Skip to content

Commit

Permalink
update typos
Browse files Browse the repository at this point in the history
  • Loading branch information
LAPTOP-BA1MV7FC\Vanessa committed Jan 14, 2024
1 parent ccc199d commit bc40678
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions _bibliography/papers.bib
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ @article{mata
title={MATA: Mission, Attitude, and Telemetry Analysis Software for Micro-Satellites},
abstract={With the rise in popularity of small satellites, there has been an increasing demand for a software tool that covers different stages of satellite development. In this paper, we extend a small satellite simulation software originally developed for earth-observation satellites Diwata-1 and Diwata-2 to support other satellite missions. This support covers various stages: from ideation, development, and up to post-launch assessment. This paper focuses on the Mission, Attitude, and Telemetry Analysis (MATA) software, which can simulate orbit, attitude, and camera views from planned earth-observation missions. Satellite engineers can also use MATA in a hardware-in-the- loop configuration, serving as one of the last functionality checks before launching the satellite. MATA can also read telemetry files from an orbiting satellite and re-project it in a virtual environment for a more intuitive assessment. This paper also covers the implemented framework for the simulator. This framework would help future developers to extend the simulator to other applications like star tracking simulations, mixed reality satellite training, and space educational software.},
author={Tan, V.* and Labrador, J.L.* and Talampas, M.C.},
journal={IEEE REGION 10 CONFERENCE (TENCON),},
journal={IEEE REGION 10 CONFERENCE (TENCON)},
year={2020},
pages={614--619},
doi={10.1109/TENCON50793.2020.9293937},
Expand All @@ -67,7 +67,7 @@ @article{multitask
title={Multi-task Learning for Detection, Recovery, and Separation of Polyphonic Music},
abstract={Music separation aims to extract the signals of individual sources from a given audio mixture. Recent studies explored the use of deep learning algorithms for this problem. Although these algorithms have proven to have good performance, they are inefficient as they need to learn an independent model for each sound source. In this study, we demonstrate a multi-task learning system for music separation, detection, and recovery. The proposed system separates polyphonic music into four sound sources using a single model. It also detects the presence of a source in the given mixture. Lastly, it reconstructs the input mixture to help the network further learn the audio representation. Our novel approach exploits the shared information in each task, thus, improving the separation performance of the system. It was determined that the best configuration for the multi-task learning is to separate the sources first, followed by parallel modules for classification and recovery. Quantitative and qualitative results show that the performance of our system is comparable to baselines for separation and classification.},
author={Tan, V. and De Leon, F.},
journal={IEEE REGION 10 CONFERENCE (TENCON),},
journal={IEEE REGION 10 CONFERENCE (TENCON)},
year={2020},
pages={1112--1117},
doi={10.1109/TENCON50793.2020.9293783},
Expand All @@ -81,7 +81,7 @@ @article{ismac
title={Time-Frequency Representations for Single-Channel Music Source Separation},
abstract={Inspired by the success of image classification and speech recognition, deep learning algorithms have been explored to solve music source separation. Solving this problem would open to a wide range of applications like automatic transcription, audio post-production, and many more. Most algorithms usually use the Short Time Fourier Transform (STFT) as the Time-Frequency (T-F) input representation. Each deep learning model has a different configuration for STFT. There is no constant STFT parameters that is used in solving music source separation. This paper explores the different parameters for STFT and investigates another representation, the Constant-Q Transform, in separating three individual sound sources. Results of experiments show that dilated convolutional layers are great for STFT while normal convolutional layers are great for CQT. The best T-F representation for music source separation is STFT with dilated CNNs and a soft masking method. Furthermore, researchers should still consider the parameters of the T-F representations to have better performance for their deep learning models.},
author={Tan, V. and de Leon, F.},
journal={International Symposium on Multimedia and Communication Technology (ISMAC),},
journal={International Symposium on Multimedia and Communication Technology (ISMAC)},
year={2019},
pages={1--6},
doi={10.1109/ISMAC.2019.8836141},
Expand All @@ -94,7 +94,7 @@ @article{wicon
title={Audio Event Detection Using Wireless Sensor Networks Based on Deep Learning},
abstract={Wireless acoustic sensor network is useful for ambient assisted living applications. Its capability of incorporating an audio event detection and classification system helps its users, especially elderly, on their everyday needs. In this paper, we propose using convolutional neural networks (CNN) for classifying audio streams. In contrast to AAL systems using traditional machine learning, our solution is capable of learning and inferring activities in an end-to-end manner. To demonstrate the system, we developed a wireless sensor network composed of Raspberry Pi boards with microphones as nodes. The audio classification system results to an accuracy of 83.79% using a parallel network for the Urban8k dataset, extracting constant-Q transform (CQT) features as system inputs. The overall system is scalabale and flexible in terms of the number of nodes, hence it is applicable on wide areas where assisted living applications are utilized.},
author={Mendoza, J.M.* and Tan, V.* and Fuentes, V. and Perez, G. and Tiglao, N.M.},
journal={Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering,},
journal={Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering},
year={2019},
pages={105--115},
publisher={Springer, Cham},
Expand All @@ -107,7 +107,7 @@ @article{vrex
title={Vrex: A Framework for Immersive Virtual Reality Experiences},
abstract={Virtual Reality (VR) is believed to be the future of gaming and even application platforms. However, creating a VR application from scratch takes up a lot of time and research. Virtual Reality frameworks simplify game development by allowing the developer focus on the actual design and system rather than dealing with the core functionalities and interactions of a VR application. In this paper, we present a Virtual Reality framework using Unity3D and the HTC Vive. With this framework, any developer can easily create a VR environment with interactions, scene objectives, player explorations, and many more. This framework is used in the creation of the adventure fantasy game, Eldervine, and adapted for the scene creator application, ANEEME. Results of experiments conducted show the framework's usability in creating different VR applications and its capability to make the interactions intuitive and the experience immersive.},
author={Blonna, R. and Tan, M.S. and Tan, V. and Mora, A.P. and Atienza, R.},
journal={IEEE Region Ten Symposium (Tensymp),},
journal={IEEE Region Ten Symposium (Tensymp)},
year={2018},
pages={118--123},
doi={10.1109/TENCONSpring.2018.8692018},
Expand All @@ -121,7 +121,7 @@ @article{aneeme
title={ANEEME: Synthesizing and Sharing Animation Building Blocks for Rapid Creation of 3D Virtual Scenes},
abstract={ANEEME focuses on building technologies that rapidly synthesize animated visual scenes. This virtual reality experience immerses users to different styles and culture of houses around the world. There are two modes in this application: build mode and play mode. In build mode, users can integrate 3D models from the local computer or from ANEEME's online repository into the virtual environment. Using ANEEME's automatic skeletal rigging, users can easily incorporate and animate humanoid objects such as toys, figurines, and even their own avatar. These enable users to design, build and customize their dream house. During play mode, users can interact with the objects inside the environment. They can watch videos, listen to music, play with different instruments, and many more. Photo and video capture capabilities available in this mode also allow users to easily share their virtual environment through their social media accounts.},
author={Tan, V. and Atienza, R. and Saludares, M.I. and Casimiro, J. and Viola, M.S.},
journal={SIGGRAPH Asia VR Showcase,},
journal={SIGGRAPH Asia VR Showcase},
year={2017},
pages={1--2},
doi={10.1145/3139468.3139479},
Expand All @@ -136,7 +136,7 @@ @article{bs
title={Study of Automatic Melody Extraction Methods for Philippine Indigenous Music},
abstract={In this study, we compared two methods for extracting the melody pitch from select Philippine indigenous music. Pitch is expressed as the fundamental frequency of the main melodic voice or lead instrument of a music sample. Our implementation of automatic melody extraction involves blind source separation and pitch detection. For blind source separation, we implemented the Harmonic-Percussive Source Separation (HPSS) algorithm and the Shifted Non-negative Matrix Factorization (SNMF) algorithm. The HPSS algorithm identifies the harmonic component from the prominent peaks in the spectrogram of a signal while the SNMF algorithm use timbre as criterion. The harmonic component is used to estimate the melody pitch. The HPSS and SNMF source separation algorithms are complemented with salience-based and data driven pitch detection algorithms, respectively. The two systems are evaluated using ten samples of Philippine indigenous music. After source separation, the estimated harmonic and percussive tracks were evaluated through subjective listening tests. Results from subjective tests show that SNMF perform better than HPSS for harmonic and percussive source separation. Moreover, objective tests using standard metrics indicate that the salience-based approach has higher accuracy in identifying the melody than the data driven approach.},
author={Disuanco, J.* and Tan, V.* and de Leon, F.},
journal={IEEE International Conference on Control System, Computing and Engineering (ICCSCE),},
journal={IEEE International Conference on Control System, Computing and Engineering (ICCSCE)},
year={2015},
pages={464--469},
doi={10.1109/ICCSCE.2015.7482230},
Expand Down

0 comments on commit bc40678

Please sign in to comment.