@article{oai:nagoya.repo.nii.ac.jp:00013223, author = {Ohno, Tomohiro and Matsubara, Shigeki and Kashioka, Hideki and Maruyama, Takehiko and Inagaki, Yasuyoshi}, journal = {Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics}, month = {Jul}, note = {Spoken monologues feature greater sentence length and structural complexity than do spoken dialogues. To achieve high parsing performance for spoken monologues, it could prove effective to simplify the structure by dividing a sentence into suitable language units. This paper proposes a method for dependency parsing of Japanese monologues based on sentence segmentation. In this method, the dependency parsing is executed in two stages: at the clause level and the sentence level. First, the dependencies within a clause are identified by dividing a sentence into clauses and executing stochastic dependency parsing for each clause. Next, the dependencies over clause boundaries are identified stochastically, and the dependency structure of the entire sentence is thus completed. An experiment using a spoken monologue corpus shows this method to be effective for efficient dependency parsing of Japanese monologue sentences, P06;1022}, pages = {169--176}, title = {Dependency Parsing of Japanese Spoken Monologue Based on Clause Boundaries}, year = {2006} }