diplomska-text/literatura.bib

87 lines
3.3 KiB
BibTeX

@article{wang2023wamf,
title={WAMF-FPI: A Weight-Adaptive Multi-Feature Fusion Network for UAV Localization},
author={Wang, Guirong and Chen, Jiahao and Dai, Ming and Zheng, Enhui},
journal={Remote Sensing},
volume={15},
number={4},
pages={910},
year={2023},
publisher={MDPI}
}
@article{bahdanau2015neural,
title={Neural Machine Translation by Jointly Learning to Align and Translate},
author={Bahdanau, Dzmitry and Cho, Kyunghyun and Bengio, Yoshua},
journal={arXiv preprint arXiv:1409.0473},
year={2015}
}
@article{dai2022finding,
title={Finding Point with Image: An End-to-End Benchmark for Vision-based UAV Localization},
author={Dai, Ming and Chen, Jiahao and Lu, Yusheng and Hao, Wenlong and Zheng, Enhui},
journal={arXiv preprint arXiv:2208.06561},
year={2022}
}
@article{vaswani2017attention,
title={Attention is all you need},
author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and Polosukhin, Illia},
journal={Advances in neural information processing systems},
volume={30},
year={2017}
}
@inproceedings{liu2021swin,
title={Swin transformer: Hierarchical vision transformer using shifted windows},
author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining},
booktitle={Proceedings of the IEEE/CVF international conference on computer vision},
pages={10012--10022},
year={2021}
}
@article{chu2021twins,
title={Twins: Revisiting the design of spatial attention in vision transformers},
author={Chu, Xiangxiang and Tian, Zhi and Wang, Yuqing and Zhang, Bo and Ren, Haibing and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua},
journal={Advances in Neural Information Processing Systems},
volume={34},
pages={9355--9366},
year={2021}
}
@article{chu2021conditional,
title={Conditional positional encodings for vision transformers},
author={Chu, Xiangxiang and Tian, Zhi and Zhang, Bo and Wang, Xinlong and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua},
journal={arXiv preprint arXiv:2102.10882},
year={2021}
}
@article{vit,
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil},
journal={arXiv preprint arXiv:2010.11929},
year={2020}
}
@article{wang2021pyramid,
title={Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions},
author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling},
journal={arXiv preprint arXiv:2102.12122},
year={2021}
}
@article{targ2016resnet,
title={Resnet in resnet: Generalizing residual architectures},
author={Targ, Sasha and Almeida, Diogo and Lyman, Kevin},
journal={arXiv preprint arXiv:1603.08029},
year={2016}
}
/tw
% slike
@misc{analyticsvidhya2022rnn,
author = {Analytics Vidhya},
title = {A Brief Overview of Recurrent Neural Networks (RNN)},
year = {2022},
url = {https://www.analyticsvidhya.com/blog/2022/03/a-brief-overview-of-recurrent-neural-networks-rnn/},
note = {Accessed: 2023-08-05}
}