From 4bec4159033e5e96c12d8edbaa59ee2130a0cbe2 Mon Sep 17 00:00:00 2001 From: Chiara Braghin Date: Thu, 13 Jun 2024 14:58:29 +0200 Subject: [PATCH] inserted DOIs to biblio --- bib_on_BigDataAccessControl.bib | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/bib_on_BigDataAccessControl.bib b/bib_on_BigDataAccessControl.bib index cff71e3..b2bee26 100644 --- a/bib_on_BigDataAccessControl.bib +++ b/bib_on_BigDataAccessControl.bib @@ -1,4 +1,5 @@ % Access Control + @inproceedings{Sandhu:ABAC:2018, title = {{A}n {A}ttribute-{B}ased {A}ccess {C}ontrol {M}odel for {S}ecure {B}ig {D}ata {P}rocessing in {H}adoop {E}cosystem}, author = {Gupta, Maanak and Patwa, Farhan and Sandhu, Ravi}, @@ -159,6 +160,7 @@ @inproceedings{IoTSecurity volume = {}, number = {}, pages = {1711--1715}, + doi = {10.1109/TrustCom/BigDataSE.2018.00257} } @inproceedings{GuptaSandu:2017, title = {{O}bject-{T}agged {RBAC} {M}odel for the {H}adoop {E}cosystem}, @@ -180,6 +182,7 @@ @inproceedings{MultipartyAC:2019 volume = {}, number = {}, pages = {29--38}, + doi = {10.1109/EuroSPW.2019.00010} } @article{vassiliadis2009survey, title = {A survey of extract--transform--load technology}, @@ -649,6 +652,8 @@ @inproceedings{Fuglede author = {Fuglede, B. and Topsoe, F.}, booktitle = {International Symposium onInformation Theory, 2004. ISIT 2004. Proceedings.}, publisher = {IEEE}, + doi = {10.1109/isit.2004.1365067}, + url = {http://dx.doi.org/10.1109/ISIT.2004.1365067} } %%%%%%% FROM MEDES2 1 %%%%%%% @article{castano2017exploratory, @@ -686,6 +691,9 @@ @article{RAHMAN20102707 volume = 1, number = 1, pages = {2707--2716}, + doi = {https://doi.org/10.1016/j.procs.2010.04.304}, + issn = {1877-0509}, + url = {https://www.sciencedirect.com/science/article/pii/S1877050910003054}, note = {ICCS 2010}, keywords = {Availability, Data mining, Forecasting, Enterprise grid, Jaccard Index}, abstract = {Enterprise Grid enables sharing and aggregation of a set of computing or storage resources connected by enterprise network, but the availability of the resources in this environment varies widely. Thus accurate prediction of the availability of these resources can significantly improve the performance of executing compute-intensive complex scientific and business applications in enterprise Grid environment by avoiding possible runtime failures. In this paper, we propose a Jaccard Index based prediction approach utilizing lazy learning algorithm that searches for a best match of a sequence pattern in the historical data in order to predict the availability of a particular machine in the system. We compare it against three other well known availability prediction techniques using simulation based study. The experimental results show that our Jaccard Index based prediction approach achieves better prediction accuracy with reduced computational complexity when compared to other similar techniques.} @@ -700,6 +708,7 @@ @INPROCEEDINGS{7014544 number={}, pages={1-7}, keywords={Data structures;Boolean functions;Cryptography;Databases;Access Control;Authorization;Big Data;Distributed System}, + doi={10.4108/icst.collaboratecom.2014.257649} } @InProceedings{dataProtection, @@ -738,7 +747,7 @@ @ARTICLE{8863330 number={}, pages={147420-147452}, keywords={Cloud computing;Access control;Encryption;Privacy;Data privacy;Edge computing;Cloud computing;privacy;access control;attribute-based encryption;trust}, - } + doi={10.1109/ACCESS.2019.2946185}} @article{Majeed2021AnonymizationTF, title={Anonymization Techniques for Privacy Preserving Data Publishing: A Comprehensive Survey}, @@ -759,6 +768,7 @@ @article{dataAccuracy pages = {5--33}, year = {1996}, publisher = {Routledge}, +doi = {10.1080/07421222.1996.11518099}, } @article{dataQuality, @@ -771,6 +781,8 @@ @article{dataQuality volume = {41}, number = {2}, issn = {0001-0782}, +url = {https://doi.org/10.1145/269012.269021}, +doi = {10.1145/269012.269021}, journal = {Commun. ACM}, month = {feb}, pages = {54–57}, @@ -782,6 +794,7 @@ @Article{impetusPaper title = "{Balancing Protection and Quality in Big Data Analytics Pipelines}", journal = "Big Data", year = "2024", +doi = {10.1089/big.2023.0065}, } @book{bookMetrics, @@ -837,6 +850,9 @@ @article{VANDENBROEK2018330 volume = {129}, pages = {330-338}, year = {2018}, +issn = {0040-1625}, +doi = {https://doi.org/10.1016/j.techfore.2017.09.040}, +url = {https://www.sciencedirect.com/science/article/pii/S0040162517314695}, author = {Tijs {van den Broek} and Anne Fleur {van Veenstra}}, keywords = {Disruptive innovation, Data protection regulation, Big data, Governance, Inter-organizational collaboration}, abstract = {Big data is an important driver of disruptive innovation that may increase organizations' competitive advantage. To create innovative data combinations and decrease investments, big data is often shared among organizations, crossing organizational boundaries. However, these big data collaborations need to balance disruptive innovation and compliance to a strict data protection regime in the EU. This paper investigates how inter-organizational big data collaborations arrange and govern their activities in the context of this dilemma. We conceptualize big data as inter-organizational systems and build on IS and Organization Theory literature to develop four archetypical governance arrangements: Market, Hierarchy, Bazaar and Network. Subsequently, these arrangements are investigated in four big data collaboration use cases. The contributions of this study to literature are threefold. First, we conceptualize the organization behind big data collaborations as IOS governance. Second, we show that the choice for an inter-organizational governance arrangement highly depends on the institutional pressure from regulation and the type of data that is shared. In this way, we contribute to the limited body of research on the antecedents of IOS governance. Last, we highlight with four use cases how the principles of big data, specifically data maximization, clash with the principles of EU data protection regulation. Practically, our study provides guidelines for IT and innovation managers how to arrange and govern the sharing of data among multiple organizations.} @@ -850,6 +866,7 @@ @article{needtobalance title = {Balancing data privacy and usability in the federal statistical system}, volume = {119}, journal = {Proceedings of the National Academy of Sciences}, + doi = {10.1073/pnas.2104906119} } @INPROCEEDINGS{secureWScomposition, @@ -861,6 +878,7 @@ @INPROCEEDINGS{secureWScomposition number={}, pages={489-496}, keywords={Security;Web services;XML;Simple object access protocol;Service oriented architecture;Privacy;Software systems;Software design;Web and internet services;Computer architecture}, + doi={10.1109/ICWS.2006.115} } @article{SELLAMI2020102732, @@ -870,6 +888,8 @@ @article{SELLAMI2020102732 pages = {102732}, year = {2020}, issn = {1084-8045}, + doi = {https://doi.org/10.1016/j.jnca.2020.102732}, + url = {https://www.sciencedirect.com/science/article/pii/S108480452030206X}, author = {Mokhtar Sellami and Haithem Mezni and Mohand Said Hacid}, keywords = {Big data, Big service, Big service composition, Quality of big services, Fuzzy RCA, Spark}, abstract = {Over the last years, big data has emerged as a new paradigm for the processing and analysis of massive volumes of data. Big data processing has been combined with service and cloud computing, leading to a new class of services called “Big Services”. In this new model, services can be seen as an abstract layer that hides the complexity of the processed big data. To meet users' complex and heterogeneous needs in the era of big data, service reuse is a natural and efficient means that helps orchestrating available services' operations, to provide customer on-demand big services. However different from traditional Web service composition, composing big services refers to the reuse of, not only existing high-quality services, but also high-quality data sources, while taking into account their security constraints (e.g., data provenance, threat level and data leakage). Moreover, composing heterogeneous and large-scale data-centric services faces several challenges, apart from security risks, such as the big services' high execution time and the incompatibility between providers' policies across multiple domains and clouds. Aiming to solve the above issues, we propose a scalable approach for big service composition, which considers not only the quality of reused services (QoS), but also the quality of their consumed data sources (QoD). Since the correct representation of big services requirements is the first step towards an effective composition, we first propose a quality model for big services and we quantify the data breaches using L-Severity metrics. Then to facilitate processing and mining big services' related information during composition, we exploit the strong mathematical foundation of fuzzy Relational Concept Analysis (fuzzy RCA) to build the big services' repository as a lattice family. We also used fuzzy RCA to cluster services and data sources based on various criteria, including their quality levels, their domains, and the relationships between them. Finally, we define algorithms that parse the lattice family to select and compose high-quality and secure big services in a parallel fashion. The proposed method, which is implemented on top of Spark big data framework, is compared with two existing approaches, and experimental studies proved the effectiveness of our big service composition approach in terms of QoD-aware composition, scalability, and security breaches.} @@ -884,6 +904,7 @@ @ARTICLE{9844845 number={3}, pages={1999-2012}, keywords={Software;Behavioral sciences;Microservice architectures;Art;Monitoring;Focusing;Codes;Assurance;certification;security;service selection}, + doi={10.1109/TSC.2022.3195071} } @InProceedings{Lseverity, @@ -935,6 +956,9 @@ @Inbook{Kellerer2004 address="Berlin, Heidelberg", pages="235--283", abstract="In this first chapter of extensions and generalizations of the basic knapsack problem (KP) we will add additional constraints to the single weight constraint (1.2) thus attaining the multidimensional knapsack problem. After the introduction we will deal extensively with relaxations and reductions in Section 9.2. Exact algorithms to compute optimal solutions will be covered in Section 9.3 followed by results on approximation in Section 9.4. A detailed treatment of heuristic methods will be given in Section 9.5. Separate sections are devoted to two special cases, namely the two-dimensional knapsack problem (Section 9.6) and the cardinality constrained knapsack problem (Section 9.7). Finally, we will consider the combination of multiple constraints and multiple-choice selection of items from classes (see Chapter 11 for the one-dimensional case) in Section 9.8.", +isbn="978-3-540-24777-7", +doi="10.1007/978-3-540-24777-7_9", +url="https://doi.org/10.1007/978-3-540-24777-7_9" }