X-Git-Url: https://git.martlubbers.net/?a=blobdiff_plain;ds=sidebyside;f=asr.tex;h=9cdbaf5582067cf91e3a6b83b320d68e1fbf6ded;hb=5945b2bce63d92454882cb7c66fb1c8d87c3a271;hp=e4c126d0b8ff4127ba2decbdf71f98b9d7969336;hpb=31144fdff5a32d804a963dbfdefd8faf6c0ba756;p=asr1617.git diff --git a/asr.tex b/asr.tex index e4c126d..9cdbaf5 100644 --- a/asr.tex +++ b/asr.tex @@ -1,37 +1,89 @@ %&asr +\usepackage[toc,nonumberlist,acronyms]{glossaries} +\makeglossaries% +\newacronym{ANN}{ANN}{Artificial Neural Network} +\newacronym{HMM}{HMM}{Hidden Markov Model} +\newacronym{GMM}{GMM}{Gaussian Mixture Models} +\newacronym{DHMM}{DHMM}{Duration-explicit \acrlong{HMM}} +\newacronym{HTK}{HTK}{\acrlong{HMM} Toolkit} +\newacronym{FA}{FA}{Forced alignment} +\newacronym{MFC}{MFC}{Mel-frequency cepstrum} +\newacronym{MFCC}{MFCC}{\acrlong{MFC} coefficient} +\newacronym{PPF}{PPF}{Posterior Probability Features} +\newacronym{MLP}{MLP}{Multi-layer Perceptron} +\newacronym{PLP}{PLP}{Perceptual Linear Prediction} +\newacronym{ZCR}{ZCR}{Zero-crossing Rate} +\newacronym{LPC}{LPC}{Linear Prediction Coefficients} +\newacronym{LPCC}{LPCC}{\acrlong{LPC} derivec cepstrum} +\newacronym{IFPI}{IFPI}{International Federation of the Phonographic Industry} +\newglossaryentry{dm}{name={Death Metal}, + description={is an extreme heavy metal music style with growling vocals and + pounding drums}} +\newglossaryentry{dom}{name={Doom Metal}, + description={is an extreme heavy metal music style with growling vocals and + pounding drums played very slowly}} +\newglossaryentry{FT}{name={Fourier Transform}, + description={is a technique of converting a time representation signal to a + frequency representation}} +\newglossaryentry{MS}{name={Mel-Scale}, + description={is a human ear inspired scale for spectral signals.}} +\newglossaryentry{Viterbi}{name={Viterbi}, + description={is a dynamic programming algorithm for finding the most likely + sequence of hidden states in a \gls{HMM}}} + \begin{document} -%Titlepage +\frontmatter{} + \maketitleru[ course={(Automatic) Speech Recognition}, institute={Radboud University Nijmegen}, - authorstext={Author:}] + authorstext={Author:}, + pagenr=1] \listoftodos[Todo] -t\cite{muller_multimodal_2012} - -t\cite{pedone_phoneme-level_2011} - -t\cite{fujihara_automatic_2006} - -t\cite{mesaros_adaptation_2009} - -t\cite{mesaros_automatic_2010} - -t\cite{dzhambazov_automatic_2016} +\tableofcontents -t\cite{mesaros_automatic_2008} +\mainmatter{} +%Berenzweig and Ellis use acoustic classifiers from speech recognition as a +%detector for singing lines. They achive 80\% accuracy for forty 15 second +%exerpts. They mention people that wrote signal features that discriminate +%between speech and music. Neural net +%\glspl{HMM}~\cite{berenzweig_locating_2001}. +% +%In 2014 Dzhambazov et al.\ applied state of the art segmentation methods to +%polyphonic turkish music, this might be interesting to use for heavy metal. +%They mention Fujihara (2011) to have a similar \gls{FA} system. This method uses +%phone level segmentation, first 12 \gls{MFCC}s. They first do vocal/non-vocal +%detection, then melody extraction, then alignment. They compare results with +%Mesaros \& Virtanen, 2008~\cite{dzhambazov_automatic_2014}. Later they +%specialize in long syllables in a capella. They use \glspl{DHMM} with +%\glspl{GMM} and show that adding knowledge increases alignment (bejing opera +%has long syllables)~\cite{dzhambazov_automatic_2016}. +% -t\cite{berenzweig_locating_2001} -t\cite{dzhambazov_automatic_2014} +%Introduction, leading to a clearly defined research question +\chapter{Introduction} +\input{intro.tex} -t\cite{fujihara_three_2008} +\chapter{Methods} +\input{methods.tex} -t\cite{yang_machine_2012} +\chapter{Conclusion \& Discussion} +\input{conclusion.tex} -t\cite{fujihara_lyricsynchronizer:_2011} +%(Appendices) +\appendix +\input{appendices.tex} -t\cite{mauch_integrating_2012} +\newpage +%Glossaries +\glsaddall{} +\begingroup +\let\clearpage\relax +\let\cleardoublepage\relax +\printglossaries{} +\endgroup \bibliographystyle{ieeetr} \bibliography{asr}