-
-
Notifications
You must be signed in to change notification settings - Fork 5.5k
/
Copy pathaws.ts
2446 lines (2446 loc) · 400 KB
/
aws.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
export const awsProfileGenerator: Fig.Generator = {
cache: {
strategy: "stale-while-revalidate",
cacheByDirectory: true,
},
script: ["aws", "configure", "list-profiles"],
postProcess: function (out) {
if (out.trim() == "") {
return [];
}
return out.split("\n").map((line) => ({
name: line,
icon: "👤",
}));
},
};
const completionSpec: Fig.Spec = {
name: "aws",
options: [
{
name: "--profile",
description: "Use a specific profile from your credential file",
args: {
generators: awsProfileGenerator,
filterStrategy: "fuzzy",
},
},
],
subcommands: [
{
name: "accessanalyzer",
description:
"Identity and Access Management Access Analyzer helps you to set, verify, and refine your IAM policies by providing a suite of capabilities. Its features include findings for external and unused access, basic and custom policy checks for validating policies, and policy generation to generate fine-grained policies. To start using IAM Access Analyzer to identify external or unused access, you first need to create an analyzer. External access analyzers help identify potential risks of accessing resources by enabling you to identify any resource policies that grant access to an external principal. It does this by using logic-based reasoning to analyze resource-based policies in your Amazon Web Services environment. An external principal can be another Amazon Web Services account, a root user, an IAM user or role, a federated user, an Amazon Web Services service, or an anonymous user. You can also use IAM Access Analyzer to preview public and cross-account access to your resources before deploying permissions changes. Unused access analyzers help identify potential identity access risks by enabling you to identify unused IAM roles, unused access keys, unused console passwords, and IAM principals with unused service and action-level permissions. Beyond findings, IAM Access Analyzer provides basic and custom policy checks to validate IAM policies before deploying permissions changes. You can use policy generation to refine permissions by attaching a policy generated using access activity logged in CloudTrail logs. This guide describes the IAM Access Analyzer operations that you can call programmatically. For general information about IAM Access Analyzer, see Identity and Access Management Access Analyzer in the IAM User Guide",
loadSpec: "aws/accessanalyzer",
},
{
name: "account",
description: "Operations for Amazon Web Services Account Management",
loadSpec: "aws/account",
},
{
name: "acm",
description:
"Certificate Manager You can use Certificate Manager (ACM) to manage SSL/TLS certificates for your Amazon Web Services-based websites and applications. For more information about using ACM, see the Certificate Manager User Guide",
loadSpec: "aws/acm",
},
{
name: "acm-pca",
description:
"This is the Amazon Web Services Private Certificate Authority API Reference. It provides descriptions, syntax, and usage examples for each of the actions and data types involved in creating and managing a private certificate authority (CA) for your organization. The documentation for each action shows the API request parameters and the JSON response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you prefer. For more information, see Amazon Web Services SDKs. Each Amazon Web Services Private CA API operation has a quota that determines the number of times the operation can be called per second. Amazon Web Services Private CA throttles API requests at different rates depending on the operation. Throttling means that Amazon Web Services Private CA rejects an otherwise valid request because the request exceeds the operation's quota for the number of requests per second. When a request is throttled, Amazon Web Services Private CA returns a ThrottlingException error. Amazon Web Services Private CA does not guarantee a minimum request rate for APIs. To see an up-to-date list of your Amazon Web Services Private CA quotas, or to request a quota increase, log into your Amazon Web Services account and visit the Service Quotas console",
loadSpec: "aws/acm-pca",
},
{
name: "amp",
description:
"Amazon Managed Service for Prometheus is a serverless, Prometheus-compatible monitoring service for container metrics that makes it easier to securely monitor container environments at scale. With Amazon Managed Service for Prometheus, you can use the same open-source Prometheus data model and query language that you use today to monitor the performance of your containerized workloads, and also enjoy improved scalability, availability, and security without having to manage the underlying infrastructure. For more information about Amazon Managed Service for Prometheus, see the Amazon Managed Service for Prometheus User Guide. Amazon Managed Service for Prometheus includes two APIs. Use the Amazon Web Services API described in this guide to manage Amazon Managed Service for Prometheus resources, such as workspaces, rule groups, and alert managers. Use the Prometheus-compatible API to work within your Prometheus workspace",
loadSpec: "aws/amp",
},
{
name: "amplify",
description:
"Amplify enables developers to develop and deploy cloud-powered mobile and web apps. Amplify Hosting provides a continuous delivery and hosting service for web applications. For more information, see the Amplify Hosting User Guide. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation for client app development. For more information, see the Amplify Framework",
loadSpec: "aws/amplify",
},
{
name: "amplifybackend",
description: "AWS Amplify Admin API",
loadSpec: "aws/amplifybackend",
},
{
name: "amplifyuibuilder",
description:
"The Amplify UI Builder API provides a programmatic interface for creating and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's backend Amazon Web Services resources. You can also use the Amplify Studio visual designer to create UI components and model data for an app. For more information, see Introduction in the Amplify Docs. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation for client app development. For more information, see the Amplify Framework. For more information about deploying an Amplify application to Amazon Web Services, see the Amplify User Guide",
loadSpec: "aws/amplifyuibuilder",
},
{
name: "apigateway",
description:
"Amazon API Gateway Amazon API Gateway helps developers deliver robust, secure, and scalable mobile and web application back ends. API Gateway allows developers to securely connect mobile and web applications to APIs that run on Lambda, Amazon EC2, or other publicly addressable web services that are hosted outside of AWS",
loadSpec: "aws/apigateway",
},
{
name: "apigatewaymanagementapi",
description:
"The Amazon API Gateway Management API allows you to directly manage runtime aspects of your deployed APIs. To use it, you must explicitly set the SDK's endpoint to point to the endpoint of your deployed API. The endpoint will be of the form https://{api-id}.execute-api.{region}.amazonaws.com/{stage}, or will be the endpoint corresponding to your API's custom domain and base path, if applicable",
loadSpec: "aws/apigatewaymanagementapi",
},
{
name: "apigatewayv2",
description: "Amazon API Gateway V2",
loadSpec: "aws/apigatewayv2",
},
{
name: "appconfig",
description:
"AppConfig feature flags and dynamic configurations help software builders quickly and securely adjust application behavior in production environments without full code deployments. AppConfig speeds up software release frequency, improves application resiliency, and helps you address emergent issues more quickly. With feature flags, you can gradually release new capabilities to users and measure the impact of those changes before fully deploying the new capabilities to all users. With operational flags and dynamic configurations, you can update block lists, allow lists, throttling limits, logging verbosity, and perform other operational tuning to quickly respond to issues in production environments. AppConfig is a capability of Amazon Web Services Systems Manager. Despite the fact that application configuration content can vary greatly from application to application, AppConfig supports the following use cases, which cover a broad spectrum of customer needs: Feature flags and toggles - Safely release new capabilities to your customers in a controlled environment. Instantly roll back changes if you experience a problem. Application tuning - Carefully introduce application changes while testing the impact of those changes with users in production environments. Allow list or block list - Control access to premium features or instantly block specific users without deploying new code. Centralized configuration storage - Keep your configuration data organized and consistent across all of your workloads. You can use AppConfig to deploy configuration data stored in the AppConfig hosted configuration store, Secrets Manager, Systems Manager, Parameter Store, or Amazon S3. How AppConfig works This section provides a high-level description of how AppConfig works and how you get started. 1. Identify configuration values in code you want to manage in the cloud Before you start creating AppConfig artifacts, we recommend you identify configuration data in your code that you want to dynamically manage using AppConfig. Good examples include feature flags or toggles, allow and block lists, logging verbosity, service limits, and throttling rules, to name a few. If your configuration data already exists in the cloud, you can take advantage of AppConfig validation, deployment, and extension features to further streamline configuration data management. 2. Create an application namespace To create a namespace, you create an AppConfig artifact called an application. An application is simply an organizational construct like a folder. 3. Create environments For each AppConfig application, you define one or more environments. An environment is a logical grouping of targets, such as applications in a Beta or Production environment, Lambda functions, or containers. You can also define environments for application subcomponents, such as the Web, Mobile, and Back-end. You can configure Amazon CloudWatch alarms for each environment. The system monitors alarms during a configuration deployment. If an alarm is triggered, the system rolls back the configuration. 4. Create a configuration profile A configuration profile includes, among other things, a URI that enables AppConfig to locate your configuration data in its stored location and a profile type. AppConfig supports two configuration profile types: feature flags and freeform configurations. Feature flag configuration profiles store their data in the AppConfig hosted configuration store and the URI is simply hosted. For freeform configuration profiles, you can store your data in the AppConfig hosted configuration store or any Amazon Web Services service that integrates with AppConfig, as described in Creating a free form configuration profile in the the AppConfig User Guide. A configuration profile can also include optional validators to ensure your configuration data is syntactically and semantically correct. AppConfig performs a check using the validators when you start a deployment. If any errors are detected, the deployment rolls back to the previous configuration data. 5. Deploy configuration data When you create a new deployment, you specify the following: An application ID A configuration profile ID A configuration version An environment ID where you want to deploy the configuration data A deployment strategy ID that defines how fast you want the changes to take effect When you call the StartDeployment API action, AppConfig performs the following tasks: Retrieves the configuration data from the underlying data store by using the location URI in the configuration profile. Verifies the configuration data is syntactically and semantically correct by using the validators you specified when you created your configuration profile. Caches a copy of the data so it is ready to be retrieved by your application. This cached copy is called the deployed data. 6. Retrieve the configuration You can configure AppConfig Agent as a local host and have the agent poll AppConfig for configuration updates. The agent calls the StartConfigurationSession and GetLatestConfiguration API actions and caches your configuration data locally. To retrieve the data, your application makes an HTTP call to the localhost server. AppConfig Agent supports several use cases, as described in Simplified retrieval methods in the the AppConfig User Guide. If AppConfig Agent isn't supported for your use case, you can configure your application to poll AppConfig for configuration updates by directly calling the StartConfigurationSession and GetLatestConfiguration API actions. This reference is intended to be used with the AppConfig User Guide",
loadSpec: "aws/appconfig",
},
{
name: "appconfigdata",
description:
"AppConfig Data provides the data plane APIs your application uses to retrieve configuration data. Here's how it works: Your application retrieves configuration data by first establishing a configuration session using the AppConfig Data StartConfigurationSession API action. Your session's client then makes periodic calls to GetLatestConfiguration to check for and retrieve the latest data available. When calling StartConfigurationSession, your code sends the following information: Identifiers (ID or name) of an AppConfig application, environment, and configuration profile that the session tracks. (Optional) The minimum amount of time the session's client must wait between calls to GetLatestConfiguration. In response, AppConfig provides an InitialConfigurationToken to be given to the session's client and used the first time it calls GetLatestConfiguration for that session. This token should only be used once in your first call to GetLatestConfiguration. You must use the new token in the GetLatestConfiguration response (NextPollConfigurationToken) in each subsequent call to GetLatestConfiguration. When calling GetLatestConfiguration, your client code sends the most recent ConfigurationToken value it has and receives in response: NextPollConfigurationToken: the ConfigurationToken value to use on the next call to GetLatestConfiguration. NextPollIntervalInSeconds: the duration the client should wait before making its next call to GetLatestConfiguration. This duration may vary over the course of the session, so it should be used instead of the value sent on the StartConfigurationSession call. The configuration: the latest data intended for the session. This may be empty if the client already has the latest version of the configuration. The InitialConfigurationToken and NextPollConfigurationToken should only be used once. To support long poll use cases, the tokens are valid for up to 24 hours. If a GetLatestConfiguration call uses an expired token, the system returns BadRequestException. For more information and to view example CLI commands that show how to retrieve a configuration using the AppConfig Data StartConfigurationSession and GetLatestConfiguration API actions, see Retrieving the configuration in the AppConfig User Guide",
loadSpec: "aws/appconfigdata",
},
{
name: "appfabric",
description:
"Amazon Web Services AppFabric quickly connects software as a service (SaaS) applications across your organization. This allows IT and security teams to easily manage and secure applications using a standard schema, and employees can complete everyday tasks faster using generative artificial intelligence (AI). You can use these APIs to complete AppFabric tasks, such as setting up audit log ingestions or viewing user access. For more information about AppFabric, including the required permissions to use the service, see the Amazon Web Services AppFabric Administration Guide. For more information about using the Command Line Interface (CLI) to manage your AppFabric resources, see the AppFabric section of the CLI Reference",
loadSpec: "aws/appfabric",
},
{
name: "appflow",
description:
"Welcome to the Amazon AppFlow API reference. This guide is for developers who need detailed information about the Amazon AppFlow API operations, data types, and errors. Amazon AppFlow is a fully managed integration service that enables you to securely transfer data between software as a service (SaaS) applications like Salesforce, Marketo, Slack, and ServiceNow, and Amazon Web Services like Amazon S3 and Amazon Redshift. Use the following links to get started on the Amazon AppFlow API: Actions: An alphabetical list of all Amazon AppFlow API operations. Data types: An alphabetical list of all Amazon AppFlow data types. Common parameters: Parameters that all Query operations can use. Common errors: Client and server errors that all operations can return. If you're new to Amazon AppFlow, we recommend that you review the Amazon AppFlow User Guide. Amazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include applicable OAuth attributes (such as auth-code and redirecturi) with the connector-specific ConnectorProfileProperties when creating a new connector profile using Amazon AppFlow API operations. For example, Salesforce users can refer to the Authorize Apps with OAuth documentation",
loadSpec: "aws/appflow",
},
{
name: "appintegrations",
description:
"Amazon AppIntegrations actions Amazon AppIntegrations data types The Amazon AppIntegrations service enables you to configure and reuse connections to external applications. For information about how you can use external applications with Amazon Connect, see the following topics in the Amazon Connect Administrator Guide: Third-party applications (3p apps) in the agent workspace Use Amazon Q in Connect for generative AI\u2013powered agent assistance in real-time",
loadSpec: "aws/appintegrations",
},
{
name: "application-autoscaling",
description:
"With Application Auto Scaling, you can configure automatic scaling for the following resources: Amazon AppStream 2.0 fleets Amazon Aurora Replicas Amazon Comprehend document classification and entity recognizer endpoints Amazon DynamoDB tables and global secondary indexes throughput capacity Amazon ECS services Amazon ElastiCache for Redis clusters (replication groups) Amazon EMR clusters Amazon Keyspaces (for Apache Cassandra) tables Lambda function provisioned concurrency Amazon Managed Streaming for Apache Kafka broker storage Amazon Neptune clusters Amazon SageMaker endpoint variants Amazon SageMaker inference components Amazon SageMaker serverless endpoint provisioned concurrency Spot Fleets (Amazon EC2) Pool of WorkSpaces Custom resources provided by your own applications or services To learn more about Application Auto Scaling, see the Application Auto Scaling User Guide. API Summary The Application Auto Scaling service API includes three key sets of actions: Register and manage scalable targets - Register Amazon Web Services or custom resources as scalable targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and retrieve information on existing scalable targets. Configure and manage automatic scaling - Define scaling policies to dynamically scale your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve your recent scaling activity history. Suspend and resume scaling - Temporarily suspend and later resume automatic scaling by calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can suspend and resume (individually or in combination) scale-out activities that are triggered by a scaling policy, scale-in activities that are triggered by a scaling policy, and scheduled scaling",
loadSpec: "aws/application-autoscaling",
},
{
name: "application-insights",
description:
"Amazon CloudWatch Application Insights Amazon CloudWatch Application Insights is a service that helps you detect common problems with your applications. It enables you to pinpoint the source of issues in your applications (built with technologies such as Microsoft IIS, .NET, and Microsoft SQL Server), by providing key insights into detected problems. After you onboard your application, CloudWatch Application Insights identifies, recommends, and sets up metrics and logs. It continuously analyzes and correlates your metrics and logs for unusual behavior to surface actionable problems with your application. For example, if your application is slow and unresponsive and leading to HTTP 500 errors in your Application Load Balancer (ALB), Application Insights informs you that a memory pressure problem with your SQL Server database is occurring. It bases this analysis on impactful metrics and log errors",
loadSpec: "aws/application-insights",
},
{
name: "application-signals",
description:
"Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. Application Signals works with CloudWatch RUM, CloudWatch Synthetics canaries, and Amazon Web Services Service Catalog AppRegistry, to display your client pages, Synthetics canaries, and application names within dashboards and maps",
loadSpec: "aws/application-signals",
},
{
name: "applicationcostprofiler",
description:
"This reference provides descriptions of the AWS Application Cost Profiler API. The AWS Application Cost Profiler API provides programmatic access to view, create, update, and delete application cost report definitions, as well as to import your usage data into the Application Cost Profiler service. For more information about using this service, see the AWS Application Cost Profiler User Guide",
loadSpec: "aws/applicationcostprofiler",
},
{
name: "appmesh",
description:
"App Mesh is a service mesh based on the Envoy proxy that makes it easy to monitor and control microservices. App Mesh standardizes how your microservices communicate, giving you end-to-end visibility and helping to ensure high availability for your applications. App Mesh gives you consistent visibility and network traffic controls for every microservice in an application. You can use App Mesh with Amazon Web Services Fargate, Amazon ECS, Amazon EKS, Kubernetes on Amazon Web Services, and Amazon EC2. App Mesh supports microservice applications that use service discovery naming for their components. For more information about service discovery on Amazon ECS, see Service Discovery in the Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns and coredns are supported. For more information, see DNS for Services and Pods in the Kubernetes documentation",
loadSpec: "aws/appmesh",
},
{
name: "apprunner",
description:
"App Runner App Runner is an application service that provides a fast, simple, and cost-effective way to go directly from an existing container image or source code to a running service in the Amazon Web Services Cloud in seconds. You don't need to learn new technologies, decide which compute service to use, or understand how to provision and configure Amazon Web Services resources. App Runner connects directly to your container registry or source code repository. It provides an automatic delivery pipeline with fully managed operations, high performance, scalability, and security. For more information about App Runner, see the App Runner Developer Guide. For release information, see the App Runner Release Notes. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that you can use to access the API, see Tools for Amazon Web Services. Endpoints For a list of Region-specific endpoints that App Runner supports, see App Runner endpoints and quotas in the Amazon Web Services General Reference",
loadSpec: "aws/apprunner",
},
{
name: "appstream",
description:
"Amazon AppStream 2.0 This is the Amazon AppStream 2.0 API Reference. This documentation provides descriptions and syntax for each of the actions and data types in AppStream 2.0. AppStream 2.0 is a fully managed, secure application streaming service that lets you stream desktop applications to users without rewriting applications. AppStream 2.0 manages the AWS resources that are required to host and run your applications, scales automatically, and provides access to your users on demand. You can call the AppStream 2.0 API operations by using an interface VPC endpoint (interface endpoint). For more information, see Access AppStream 2.0 API Operations and CLI Commands Through an Interface VPC Endpoint in the Amazon AppStream 2.0 Administration Guide. To learn more about AppStream 2.0, see the following resources: Amazon AppStream 2.0 product page Amazon AppStream 2.0 documentation",
loadSpec: "aws/appstream",
},
{
name: "appsync",
description:
"AppSync provides API actions for creating and interacting with data sources using GraphQL from your application",
loadSpec: "aws/appsync",
},
{
name: "apptest",
description:
"AWS Mainframe Modernization Application Testing provides tools and resources for automated functional equivalence testing for your migration projects",
loadSpec: "aws/apptest",
},
{
name: "arc-zonal-shift",
description:
"Welcome to the API Reference Guide for zonal shift and zonal autoshift in Amazon Route 53 Application Recovery Controller (Route 53 ARC). You can start a zonal shift to move traffic for a load balancer resource away from an Availability Zone to help your application recover quickly from an impairment in an Availability Zone. For example, you can recover your application from a developer's bad code deployment or from an Amazon Web Services infrastructure failure in a single Availability Zone. You can also configure zonal autoshift for supported load balancer resources. Zonal autoshift is a capability in Route 53 ARC where you authorize Amazon Web Services to shift away application resource traffic from an Availability Zone during events, on your behalf, to help reduce your time to recovery. Amazon Web Services starts an autoshift when internal telemetry indicates that there is an Availability Zone impairment that could potentially impact customers. To help make sure that zonal autoshift is safe for your application, you must also configure practice runs when you enable zonal autoshift for a resource. Practice runs start weekly zonal shifts for a resource, to shift traffic for the resource away from an Availability Zone. Practice runs help you to make sure, on a regular basis, that you have enough capacity in all the Availability Zones in an Amazon Web Services Region for your application to continue to operate normally when traffic for a resource is shifted away from one Availability Zone. Before you configure practice runs or enable zonal autoshift, we strongly recommend that you prescale your application resource capacity in all Availability Zones in the Region where your application resources are deployed. You should not rely on scaling on demand when an autoshift or practice run starts. Zonal autoshift, including practice runs, works independently, and does not wait for auto scaling actions to complete. Relying on auto scaling, instead of pre-scaling, can result in loss of availability. If you use auto scaling to handle regular cycles of traffic, we strongly recommend that you configure the minimum capacity of your auto scaling to continue operating normally with the loss of an Availability Zone. Be aware that Route 53 ARC does not inspect the health of individual resources. Amazon Web Services only starts an autoshift when Amazon Web Services telemetry detects that there is an Availability Zone impairment that could potentially impact customers. In some cases, resources might be shifted away that are not experiencing impact. For more information about using zonal shift and zonal autoshift, see the Amazon Route 53 Application Recovery Controller Developer Guide",
loadSpec: "aws/arc-zonal-shift",
},
{
name: "artifact",
description:
"This reference provides descriptions of the low-level AWS Artifact Service API",
loadSpec: "aws/artifact",
},
{
name: "athena",
description:
"Amazon Athena is an interactive query service that lets you use standard SQL to analyze data directly in Amazon S3. You can point Athena at your data in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is serverless, so there is no infrastructure to set up or manage. You pay only for the queries you run. Athena scales automatically\u2014executing queries in parallel\u2014so results are fast, even with large datasets and complex queries. For more information, see What is Amazon Athena in the Amazon Athena User Guide. If you connect to Athena using the JDBC driver, use version 1.1.0 of the driver or later with the Amazon Athena API. Earlier version drivers do not support the API. For more information and to download the driver, see Accessing Amazon Athena with JDBC",
loadSpec: "aws/athena",
},
{
name: "auditmanager",
description:
"Welcome to the Audit Manager API reference. This guide is for developers who need detailed information about the Audit Manager API operations, data types, and errors. Audit Manager is a service that provides automated evidence collection so that you can continually audit your Amazon Web Services usage. You can use it to assess the effectiveness of your controls, manage risk, and simplify compliance. Audit Manager provides prebuilt frameworks that structure and automate assessments for a given compliance standard. Frameworks include a prebuilt collection of controls with descriptions and testing procedures. These controls are grouped according to the requirements of the specified compliance standard or regulation. You can also customize frameworks and controls to support internal audits with specific requirements. Use the following links to get started with the Audit Manager API: Actions: An alphabetical list of all Audit Manager API operations. Data types: An alphabetical list of all Audit Manager data types. Common parameters: Parameters that all operations can use. Common errors: Client and server errors that all operations can return. If you're new to Audit Manager, we recommend that you review the Audit Manager User Guide",
loadSpec: "aws/auditmanager",
},
{
name: "autoscaling",
description:
"Amazon EC2 Auto Scaling Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks. For more information, see the Amazon EC2 Auto Scaling User Guide and the Amazon EC2 Auto Scaling API Reference",
loadSpec: "aws/autoscaling",
},
{
name: "autoscaling-plans",
description:
"AWS Auto Scaling Use AWS Auto Scaling to create scaling plans for your applications to automatically scale your scalable AWS resources. API Summary You can use the AWS Auto Scaling service API to accomplish the following tasks: Create and manage scaling plans Define target tracking scaling policies to dynamically scale your resources based on utilization Scale Amazon EC2 Auto Scaling groups using predictive scaling and dynamic scaling to scale your Amazon EC2 capacity faster Set minimum and maximum capacity limits Retrieve information on existing scaling plans Access current forecast data and historical forecast data for up to 56 days previous To learn more about AWS Auto Scaling, including information about granting IAM users required permissions for AWS Auto Scaling actions, see the AWS Auto Scaling User Guide",
loadSpec: "aws/autoscaling-plans",
},
{
name: "b2bi",
description:
"This is the Amazon Web Services B2B Data Interchange API Reference. It provides descriptions, API request parameters, and the XML response for each of the B2BI API actions. B2BI enables automated exchange of EDI (electronic data interchange) based business-critical transactions at cloud scale, with elasticity and pay-as-you-go pricing. Businesses use EDI documents to exchange transactional data with trading partners, such as suppliers and end customers, using standardized formats such as X12. Rather than actually running a command, you can use the --generate-cli-skeleton parameter with any API call to generate and display a parameter template. You can then use the generated template to customize and use as input on a later command. For details, see Generate and use a parameter skeleton file",
loadSpec: "aws/b2bi",
},
{
name: "backup",
description:
"Backup Backup is a unified backup service designed to protect Amazon Web Services services and their associated data. Backup simplifies the creation, migration, restoration, and deletion of backups, while also providing reporting and auditing",
loadSpec: "aws/backup",
},
{
name: "backup-gateway",
description:
"Backup gateway Backup gateway connects Backup to your hypervisor, so you can create, store, and restore backups of your virtual machines (VMs) anywhere, whether on-premises or in the VMware Cloud (VMC) on Amazon Web Services. Add on-premises resources by connecting to a hypervisor through a gateway. Backup will automatically discover the resources in your hypervisor. Use Backup to assign virtual or on-premises resources to a backup plan, or run on-demand backups. Once you have backed up your resources, you can view them and restore them like any resource supported by Backup. To download the Amazon Web Services software to get started, navigate to the Backup console, choose Gateways, then choose Create gateway",
loadSpec: "aws/backup-gateway",
},
{
name: "batch",
description:
"Batch Using Batch, you can run batch computing workloads on the Amazon Web Services Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of the batch computing to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. You can use Batch to efficiently provision resources, and work toward eliminating capacity constraints, reducing your overall compute costs, and delivering results more quickly. As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus on analyzing results and solving your specific problems instead",
loadSpec: "aws/batch",
},
{
name: "bcm-data-exports",
description:
"You can use the Data Exports API to create customized exports from multiple Amazon Web Services cost management and billing datasets, such as cost and usage data and cost optimization recommendations. The Data Exports API provides the following endpoint: https://bcm-data-exports.us-east-1.api.aws",
loadSpec: "aws/bcm-data-exports",
},
{
name: "bcm-pricing-calculator",
description:
"You can use the Pricing Calculator API to programmatically create estimates for your planned cloud use. You can model usage and commitments such as Savings Plans and Reserved Instances, and generate estimated costs using your discounts and benefit sharing preferences. The Pricing Calculator API provides the following endpoint: https://bcm-pricing-calculator.us-east-1.api.aws",
loadSpec: "aws/bcm-pricing-calculator",
},
{
name: "bedrock",
description:
"Describes the API operations for creating, managing, fine-turning, and evaluating Amazon Bedrock models",
loadSpec: "aws/bedrock",
},
{
name: "bedrock-agent",
description:
"Describes the API operations for creating and managing Amazon Bedrock agents",
loadSpec: "aws/bedrock-agent",
},
{
name: "bedrock-agent-runtime",
description:
"Contains APIs related to model invocation and querying of knowledge bases",
loadSpec: "aws/bedrock-agent-runtime",
},
{
name: "bedrock-data-automation",
description: "Amazon Bedrock Keystone Build",
loadSpec: "aws/bedrock-data-automation",
},
{
name: "bedrock-data-automation-runtime",
description: "Amazon Bedrock Keystone Runtime",
loadSpec: "aws/bedrock-data-automation-runtime",
},
{
name: "bedrock-runtime",
description:
"Describes the API operations for running inference using Amazon Bedrock models",
loadSpec: "aws/bedrock-runtime",
},
{
name: "billing",
description:
"You can use the Billing API to programatically list the billing views available to you for a given time period. A billing view represents a set of billing data. The Billing API provides the following endpoint: https://billing.us-east-1.api.aws",
loadSpec: "aws/billing",
},
{
name: "billingconductor",
description:
"Amazon Web Services Billing Conductor is a fully managed service that you can use to customize a proforma version of your billing data each month, to accurately show or chargeback your end customers. Amazon Web Services Billing Conductor doesn't change the way you're billed by Amazon Web Services each month by design. Instead, it provides you with a mechanism to configure, generate, and display rates to certain customers over a given billing period. You can also analyze the difference between the rates you apply to your accounting groupings relative to your actual rates from Amazon Web Services. As a result of your Amazon Web Services Billing Conductor configuration, the payer account can also see the custom rate applied on the billing details page of the Amazon Web Services Billing console, or configure a cost and usage report per billing group. This documentation shows how you can configure Amazon Web Services Billing Conductor using its API. For more information about using the Amazon Web Services Billing Conductor user interface, see the Amazon Web Services Billing Conductor User Guide",
loadSpec: "aws/billingconductor",
},
{
name: "braket",
description:
"The Amazon Braket API Reference provides information about the operations and structures supported in Amazon Braket. Additional Resources: Amazon Braket Developer Guide",
loadSpec: "aws/braket",
},
{
name: "budgets",
description:
"Use the Amazon Web Services Budgets API to plan your service usage, service costs, and instance reservations. This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for the Amazon Web Services Budgets feature. Budgets provide you with a way to see the following information: How close your plan is to your budgeted amount or to the free tier limits Your usage-to-date, including how much you've used of your Reserved Instances (RIs) Your current estimated charges from Amazon Web Services, and how much your predicted usage will accrue in charges by the end of the month How much of your budget has been used Amazon Web Services updates your budget status several times a day. Budgets track your unblended costs, subscriptions, refunds, and RIs. You can create the following types of budgets: Cost budgets - Plan how much you want to spend on a service. Usage budgets - Plan how much you want to use one or more services. RI utilization budgets - Define a utilization threshold, and receive alerts when your RI usage falls below that threshold. This lets you see if your RIs are unused or under-utilized. RI coverage budgets - Define a coverage threshold, and receive alerts when the number of your instance hours that are covered by RIs fall below that threshold. This lets you see how much of your instance usage is covered by a reservation. Service Endpoint The Amazon Web Services Budgets API provides the following endpoint: https://budgets.amazonaws.com For information about costs that are associated with the Amazon Web Services Budgets API, see Amazon Web Services Cost Management Pricing",
loadSpec: "aws/budgets",
},
{
name: "ce",
description:
"You can use the Cost Explorer API to programmatically query your cost and usage data. You can query for aggregated data such as total monthly costs or total daily usage. You can also query for granular data. This might include the number of daily write operations for Amazon DynamoDB database tables in your production environment. Service Endpoint The Cost Explorer API provides the following endpoint: https://ce.us-east-1.amazonaws.com For information about the costs that are associated with the Cost Explorer API, see Amazon Web Services Cost Management Pricing",
loadSpec: "aws/ce",
},
{
name: "chatbot",
description:
"The AWS Chatbot API Reference provides descriptions, API request parameters, and the XML response for each of the AWS Chatbot API actions. AWS Chatbot APIs are currently available in the following Regions: US East (Ohio) - us-east-2 US West (Oregon) - us-west-2 Asia Pacific (Singapore) - ap-southeast-1 Europe (Ireland) - eu-west-1 The AWS Chatbot console can only be used in US East (Ohio). Your configuration data however, is stored in each of the relevant available Regions. Your AWS CloudTrail events are logged in whatever Region you call from, not US East (N. Virginia) by default",
loadSpec: "aws/chatbot",
},
{
name: "chime",
description:
"Most of these APIs are no longer supported and will not be updated. We recommend using the latest versions in the Amazon Chime SDK API reference, in the Amazon Chime SDK. Using the latest versions requires migrating to dedicated namespaces. For more information, refer to Migrating from the Amazon Chime namespace in the Amazon Chime SDK Developer Guide. The Amazon Chime application programming interface (API) is designed so administrators can perform key tasks, such as creating and managing Amazon Chime accounts, users, and Voice Connectors. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes. You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls for Amazon Chime. We recommend using an AWS SDK or the AWS CLI. The page for each API action contains a See Also section that includes links to information about using the action with a language-specific AWS SDK or the AWS CLI. Using an AWS SDK You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center. Using the AWS CLI Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference. Using REST APIs If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports Signature Version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference. When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com. Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Identity and Access Management for Amazon Chime in the Amazon Chime Administration Guide",
loadSpec: "aws/chime",
},
{
name: "chime-sdk-identity",
description:
"The Amazon Chime SDK Identity APIs in this section allow software developers to create and manage unique instances of their messaging applications. These APIs provide the overarching framework for creating and sending messages. For more information about the identity APIs, refer to Amazon Chime SDK identity",
loadSpec: "aws/chime-sdk-identity",
},
{
name: "chime-sdk-media-pipelines",
description:
"The Amazon Chime SDK media pipeline APIs in this section allow software developers to create Amazon Chime SDK media pipelines that capture, concatenate, or stream your Amazon Chime SDK meetings. For more information about media pipelines, see Amazon Chime SDK media pipelines",
loadSpec: "aws/chime-sdk-media-pipelines",
},
{
name: "chime-sdk-meetings",
description:
"The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the Amazon Web Services Regions for meetings, create and manage users, and send and receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings",
loadSpec: "aws/chime-sdk-meetings",
},
{
name: "chime-sdk-messaging",
description:
"The Amazon Chime SDK messaging APIs in this section allow software developers to send and receive messages in custom messaging applications. These APIs depend on the frameworks provided by the Amazon Chime SDK identity APIs. For more information about the messaging APIs, see Amazon Chime SDK messaging",
loadSpec: "aws/chime-sdk-messaging",
},
{
name: "chime-sdk-voice",
description:
"The Amazon Chime SDK telephony APIs in this section enable developers to create PSTN calling solutions that use Amazon Chime SDK Voice Connectors, and Amazon Chime SDK SIP media applications. Developers can also order and manage phone numbers, create and manage Voice Connectors and SIP media applications, and run voice analytics",
loadSpec: "aws/chime-sdk-voice",
},
{
name: "cleanrooms",
description:
"Welcome to the Clean Rooms API Reference. Clean Rooms is an Amazon Web Services service that helps multiple parties to join their data together in a secure collaboration workspace. In the collaboration, members who can query and receive results can get insights into the collective datasets without either party getting access to the other party's raw data. To learn more about Clean Rooms concepts, procedures, and best practices, see the Clean Rooms User Guide. To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the Clean Rooms SQL Reference",
loadSpec: "aws/cleanrooms",
},
{
name: "cleanroomsml",
description:
"Welcome to the Amazon Web Services Clean Rooms ML API Reference. Amazon Web Services Clean Rooms ML provides a privacy-enhancing method for two parties to identify similar users in their data without the need to share their data with each other. The first party brings the training data to Clean Rooms so that they can create and configure an audience model (lookalike model) and associate it with a collaboration. The second party then brings their seed data to Clean Rooms and generates an audience (lookalike segment) that resembles the training data. To learn more about Amazon Web Services Clean Rooms ML concepts, procedures, and best practices, see the Clean Rooms User Guide. To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the Clean Rooms SQL Reference",
loadSpec: "aws/cleanroomsml",
},
{
name: "cloud9",
description:
"Cloud9 Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud. For more information about Cloud9, see the Cloud9 User Guide. Cloud9 supports these operations: CreateEnvironmentEC2: Creates an Cloud9 development environment, launches an Amazon EC2 instance, and then connects from the instance to the environment. CreateEnvironmentMembership: Adds an environment member to an environment. DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance. DeleteEnvironmentMembership: Deletes an environment member from an environment. DescribeEnvironmentMemberships: Gets information about environment members for an environment. DescribeEnvironments: Gets information about environments. DescribeEnvironmentStatus: Gets status information for an environment. ListEnvironments: Gets a list of environment identifiers. ListTagsForResource: Gets the tags for an environment. TagResource: Adds tags to an environment. UntagResource: Removes tags from an environment. UpdateEnvironment: Changes the settings of an existing environment. UpdateEnvironmentMembership: Changes the settings of an existing environment member for an environment",
loadSpec: "aws/cloud9",
},
{
name: "cloudcontrol",
description:
"For more information about Amazon Web Services Cloud Control API, see the Amazon Web Services Cloud Control API User Guide",
loadSpec: "aws/cloudcontrol",
},
{
name: "clouddirectory",
description:
"Amazon Cloud Directory Amazon Cloud Directory is a component of the AWS Directory Service that simplifies the development and management of cloud-scale web, mobile, and IoT applications. This guide describes the Cloud Directory operations that you can call programmatically and includes detailed information on data types and errors. For information about Cloud Directory features, see AWS Directory Service and the Amazon Cloud Directory Developer Guide",
loadSpec: "aws/clouddirectory",
},
{
name: "cloudformation",
description:
"CloudFormation CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Amazon EC2 Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure. With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about CloudFormation, see the CloudFormation product page. CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com",
loadSpec: "aws/cloudformation",
},
{
name: "cloudfront",
description:
"Amazon CloudFront This is the Amazon CloudFront API Reference. This guide is for developers who need detailed information about CloudFront API actions, data types, and errors. For detailed information about CloudFront features, see the Amazon CloudFront Developer Guide",
loadSpec: "aws/cloudfront",
},
{
name: "cloudfront-keyvaluestore",
description:
"Amazon CloudFront KeyValueStore Service to View and Update Data in a KVS Resource",
loadSpec: "aws/cloudfront-keyvaluestore",
},
{
name: "cloudhsm",
description:
"AWS CloudHSM Service This is documentation for AWS CloudHSM Classic. For more information, see AWS CloudHSM Classic FAQs, the AWS CloudHSM Classic User Guide, and the AWS CloudHSM Classic API Reference. For information about the current version of AWS CloudHSM, see AWS CloudHSM, the AWS CloudHSM User Guide, and the AWS CloudHSM API Reference",
loadSpec: "aws/cloudhsm",
},
{
name: "cloudhsmv2",
description:
"For more information about CloudHSM, see CloudHSM and the CloudHSM User Guide",
loadSpec: "aws/cloudhsmv2",
},
{
name: "cloudsearch",
description:
"Amazon CloudSearch Configuration Service You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action. The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints",
loadSpec: "aws/cloudsearch",
},
{
name: "cloudsearchdomain",
description:
"You use the AmazonCloudSearch2013 API to upload documents to a search domain and search those documents. The endpoints for submitting UploadDocuments, Search, and Suggest requests are domain-specific. To get the endpoints for your domain, use the Amazon CloudSearch configuration service DescribeDomains action. The domain endpoints are also displayed on the domain dashboard in the Amazon CloudSearch console. You submit suggest requests to the search endpoint. For more information, see the Amazon CloudSearch Developer Guide",
loadSpec: "aws/cloudsearchdomain",
},
{
name: "cloudtrail",
description:
"CloudTrail This is the CloudTrail API Reference. It provides descriptions of actions, data types, common parameters, and common errors for CloudTrail. CloudTrail is a web service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. The recorded information includes the identity of the user, the start time of the Amazon Web Services API call, the source IP address, the request parameters, and the response elements returned by the service. As an alternative to the API, you can use one of the Amazon Web Services SDKs, which consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide programmatic access to CloudTrail. For example, the SDKs handle cryptographically signing requests, managing errors, and retrying requests automatically. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools to Build on Amazon Web Services. See the CloudTrail User Guide for information about the data that is included with each Amazon Web Services API call listed in the log files",
loadSpec: "aws/cloudtrail",
},
{
name: "cloudtrail-data",
description:
"The CloudTrail Data Service lets you ingest events into CloudTrail from any source in your hybrid environments, such as in-house or SaaS applications hosted on-premises or in the cloud, virtual machines, or containers. You can store, access, analyze, troubleshoot and take action on this data without maintaining multiple log aggregators and reporting tools. After you run PutAuditEvents to ingest your application activity into CloudTrail, you can use CloudTrail Lake to search, query, and analyze the data that is logged from your applications",
loadSpec: "aws/cloudtrail-data",
},
{
name: "cloudwatch",
description:
"Amazon CloudWatch monitors your Amazon Web Services (Amazon Web Services) resources and the applications you run on Amazon Web Services in real time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications. CloudWatch alarms send notifications or automatically change the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money. In addition to monitoring the built-in metrics that come with Amazon Web Services, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health",
loadSpec: "aws/cloudwatch",
},
{
name: "codeartifact",
description:
"CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client. CodeArtifact concepts Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools such as the npm CLI or the Maven CLI ( mvn ). For a list of supported package managers, see the CodeArtifact User Guide. Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in Key Management Service (KMS). Each repository is a member of a single domain and can't be moved to a different domain. The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages. Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization. Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, NuGet, Swift, Ruby, Cargo, and generic package formats. For more information about the supported package formats and how to use CodeArtifact with them, see the CodeArtifact User Guide. In CodeArtifact, a package consists of: A name (for example, webpack is the name of a popular npm package) An optional namespace (for example, @types in @types/node) A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.) Package-level metadata (for example, npm tags) Package group: A group of packages that match a specified definition. Package groups can be used to apply configuration to multiple packages that match a defined pattern using package format, package namespace, and package name. You can use package groups to more conveniently configure package origin controls for multiple packages. Package origin controls are used to block or allow ingestion or publishing of new package versions, which protects users from malicious actions known as dependency substitution attacks. Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets. Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories. Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files. CodeArtifact supported API operations AssociateExternalConnection: Adds an existing external connection to a repository. CopyPackageVersions: Copies package versions from one repository to another repository in the same domain. CreateDomain: Creates a domain. CreatePackageGroup: Creates a package group. CreateRepository: Creates a CodeArtifact repository in a domain. DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories. DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain. DeletePackage: Deletes a package and all associated package versions. DeletePackageGroup: Deletes a package group. Does not delete packages or package versions that are associated with a package group. DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage. DeleteRepository: Deletes a repository. DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository. DescribeDomain: Returns a DomainDescription object that contains information about the requested domain. DescribePackage: Returns a PackageDescription object that contains details about a package. DescribePackageGroup: Returns a PackageGroup object that contains details about a package group. DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version. DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository. DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage. DisassociateExternalConnection: Removes an existing external connection from a repository. GetAssociatedPackageGroup: Returns the most closely associated package group to the specified package. GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours. GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain. GetPackageVersionAsset: Returns the contents of an asset that is in a package version. GetPackageVersionReadme: Gets the readme file or descriptive text for a package version. GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format: cargo generic maven npm nuget pypi ruby swift GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository. ListAllowedRepositoriesForGroup: Lists the allowed repositories for a package group that has origin configuration set to ALLOW_SPECIFIC_REPOSITORIES. ListAssociatedPackages: Returns a list of packages associated with the requested package group. ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain. ListPackages: Lists the packages in a repository. ListPackageGroups: Returns a list of package groups in the requested domain. ListPackageVersionAssets: Lists the assets for a given package version. ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version. ListPackageVersions: Returns a list of package versions for a specified package in a repository. ListRepositories: Returns a list of repositories owned by the Amazon Web Services account that called this method. ListRepositoriesInDomain: Returns a list of the repositories in a domain. ListSubPackageGroups: Returns a list of direct children of the specified package group. PublishPackageVersion: Creates a new package version containing one or more assets. PutDomainPermissionsPolicy: Attaches a resource policy to a domain. PutPackageOriginConfiguration: Sets the package origin configuration for a package, which determine how new versions of the package can be added to a specific repository. PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it. UpdatePackageGroup: Updates a package group. This API cannot be used to update a package group's origin configuration or pattern. UpdatePackageGroupOriginConfiguration: Updates the package origin configuration for a package group. UpdatePackageVersionsStatus: Updates the status of one or more versions of a package. UpdateRepository: Updates the properties of a repository",
loadSpec: "aws/codeartifact",
},
{
name: "codebuild",
description:
"CodeBuild CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in CodeBuild to use your own build tools. CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about CodeBuild, see the CodeBuild User Guide",
loadSpec: "aws/codebuild",
},
{
name: "codecatalyst",
description:
"Welcome to the Amazon CodeCatalyst API reference. This reference provides descriptions of operations and data types for Amazon CodeCatalyst. You can use the Amazon CodeCatalyst API to work with the following objects. Spaces, by calling the following: DeleteSpace, which deletes a space. GetSpace, which returns information about a space. GetSubscription, which returns information about the Amazon Web Services account used for billing purposes and the billing plan for the space. ListSpaces, which retrieves a list of spaces. UpdateSpace, which changes one or more values for a space. Projects, by calling the following: CreateProject which creates a project in a specified space. GetProject, which returns information about a project. ListProjects, which retrieves a list of projects in a space. Users, by calling the following: GetUserDetails, which returns information about a user in Amazon CodeCatalyst. Source repositories, by calling the following: CreateSourceRepository, which creates an empty Git-based source repository in a specified project. CreateSourceRepositoryBranch, which creates a branch in a specified repository where you can work on code. DeleteSourceRepository, which deletes a source repository. GetSourceRepository, which returns information about a source repository. GetSourceRepositoryCloneUrls, which returns information about the URLs that can be used with a Git client to clone a source repository. ListSourceRepositories, which retrieves a list of source repositories in a project. ListSourceRepositoryBranches, which retrieves a list of branches in a source repository. Dev Environments and the Amazon Web Services Toolkits, by calling the following: CreateDevEnvironment, which creates a Dev Environment, where you can quickly work on the code stored in the source repositories of your project. DeleteDevEnvironment, which deletes a Dev Environment. GetDevEnvironment, which returns information about a Dev Environment. ListDevEnvironments, which retrieves a list of Dev Environments in a project. ListDevEnvironmentSessions, which retrieves a list of active Dev Environment sessions in a project. StartDevEnvironment, which starts a specified Dev Environment and puts it into an active state. StartDevEnvironmentSession, which starts a session to a specified Dev Environment. StopDevEnvironment, which stops a specified Dev Environment and puts it into an stopped state. StopDevEnvironmentSession, which stops a session for a specified Dev Environment. UpdateDevEnvironment, which changes one or more values for a Dev Environment. Workflows, by calling the following: GetWorkflow, which returns information about a workflow. GetWorkflowRun, which returns information about a specified run of a workflow. ListWorkflowRuns, which retrieves a list of runs of a specified workflow. ListWorkflows, which retrieves a list of workflows in a specified project. StartWorkflowRun, which starts a run of a specified workflow. Security, activity, and resource management in Amazon CodeCatalyst, by calling the following: CreateAccessToken, which creates a personal access token (PAT) for the current user. DeleteAccessToken, which deletes a specified personal access token (PAT). ListAccessTokens, which lists all personal access tokens (PATs) associated with a user. ListEventLogs, which retrieves a list of events that occurred during a specified time period in a space. VerifySession, which verifies whether the calling user has a valid Amazon CodeCatalyst login and session. If you are using the Amazon CodeCatalyst APIs with an SDK or the CLI, you must configure your computer to work with Amazon CodeCatalyst and single sign-on (SSO). For more information, see Setting up to use the CLI with Amazon CodeCatalyst and the SSO documentation for your SDK",
loadSpec: "aws/codecatalyst",
},
{
name: "codecommit",
description:
"CodeCommit This is the CodeCommit API Reference. This reference provides descriptions of the operations and data types for CodeCommit API along with usage examples. You can use the CodeCommit API to work with the following objects: Repositories, by calling the following: BatchGetRepositories, which returns information about one or more repositories associated with your Amazon Web Services account. CreateRepository, which creates an CodeCommit repository. DeleteRepository, which deletes an CodeCommit repository. GetRepository, which returns information about a specified repository. ListRepositories, which lists all CodeCommit repositories associated with your Amazon Web Services account. UpdateRepositoryDescription, which sets or updates the description of the repository. UpdateRepositoryEncryptionKey, which updates the Key Management Service encryption key used to encrypt and decrypt a repository. UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository can access it until you send them the new HTTPS or SSH URL to use. Branches, by calling the following: CreateBranch, which creates a branch in a specified repository. DeleteBranch, which deletes the specified branch in a repository unless it is the default branch. GetBranch, which returns information about a specified branch. ListBranches, which lists all branches for a specified repository. UpdateDefaultBranch, which changes the default branch for a repository. Files, by calling the following: DeleteFile, which deletes the content of a specified file from a specified branch. GetBlob, which returns the base-64 encoded content of an individual Git blob object in a repository. GetFile, which returns the base-64 encoded content of a specified file. GetFolder, which returns the contents of a specified folder or directory. ListFileCommitHistory, which retrieves a list of commits and changes to a specified file. PutFile, which adds or modifies a single file in a specified repository and branch. Commits, by calling the following: BatchGetCommits, which returns information about one or more commits in a repository. CreateCommit, which creates a commit for changes to a repository. GetCommit, which returns information about a commit, including commit messages and author and committer information. GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference). Merges, by calling the following: BatchDescribeMergeConflicts, which returns information about conflicts in a merge between commits in a repository. CreateUnreferencedMergeCommit, which creates an unreferenced commit between two branches or commits for the purpose of comparing them and identifying any potential conflicts. DescribeMergeConflicts, which returns information about merge conflicts between the base, source, and destination versions of a file in a potential merge. GetMergeCommit, which returns information about the merge between a source and destination commit. GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request. GetMergeOptions, which returns information about the available merge options between two branches or commit specifiers. MergeBranchesByFastForward, which merges two branches using the fast-forward merge option. MergeBranchesBySquash, which merges two branches using the squash merge option. MergeBranchesByThreeWay, which merges two branches using the three-way merge option. Pull requests, by calling the following: CreatePullRequest, which creates a pull request in a specified repository. CreatePullRequestApprovalRule, which creates an approval rule for a specified pull request. DeletePullRequestApprovalRule, which deletes an approval rule for a specified pull request. DescribePullRequestEvents, which returns information about one or more pull request events. EvaluatePullRequestApprovalRules, which evaluates whether a pull request has met all the conditions specified in its associated approval rules. GetCommentsForPullRequest, which returns information about comments on a specified pull request. GetPullRequest, which returns information about a specified pull request. GetPullRequestApprovalStates, which returns information about the approval states for a specified pull request. GetPullRequestOverrideState, which returns information about whether approval rules have been set aside (overriden) for a pull request, and if so, the Amazon Resource Name (ARN) of the user or identity that overrode the rules and their requirements for the pull request. ListPullRequests, which lists all pull requests for a repository. MergePullRequestByFastForward, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the fast-forward merge option. MergePullRequestBySquash, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the squash merge option. MergePullRequestByThreeWay, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the three-way merge option. OverridePullRequestApprovalRules, which sets aside all approval rule requirements for a pull request. PostCommentForPullRequest, which posts a comment to a pull request at the specified line, file, or request. UpdatePullRequestApprovalRuleContent, which updates the structure of an approval rule for a pull request. UpdatePullRequestApprovalState, which updates the state of an approval on a pull request. UpdatePullRequestDescription, which updates the description of a pull request. UpdatePullRequestStatus, which updates the status of a pull request. UpdatePullRequestTitle, which updates the title of a pull request. Approval rule templates, by calling the following: AssociateApprovalRuleTemplateWithRepository, which associates a template with a specified repository. After the template is associated with a repository, CodeCommit creates approval rules that match the template conditions on every pull request created in the specified repository. BatchAssociateApprovalRuleTemplateWithRepositories, which associates a template with one or more specified repositories. After the template is associated with a repository, CodeCommit creates approval rules that match the template conditions on every pull request created in the specified repositories. BatchDisassociateApprovalRuleTemplateFromRepositories, which removes the association between a template and specified repositories so that approval rules based on the template are not automatically created when pull requests are created in those repositories. CreateApprovalRuleTemplate, which creates a template for approval rules that can then be associated with one or more repositories in your Amazon Web Services account. DeleteApprovalRuleTemplate, which deletes the specified template. It does not remove approval rules on pull requests already created with the template. DisassociateApprovalRuleTemplateFromRepository, which removes the association between a template and a repository so that approval rules based on the template are not automatically created when pull requests are created in the specified repository. GetApprovalRuleTemplate, which returns information about an approval rule template. ListApprovalRuleTemplates, which lists all approval rule templates in the Amazon Web Services Region in your Amazon Web Services account. ListAssociatedApprovalRuleTemplatesForRepository, which lists all approval rule templates that are associated with a specified repository. ListRepositoriesForApprovalRuleTemplate, which lists all repositories associated with the specified approval rule template. UpdateApprovalRuleTemplateDescription, which updates the description of an approval rule template. UpdateApprovalRuleTemplateName, which updates the name of an approval rule template. UpdateApprovalRuleTemplateContent, which updates the content of an approval rule template. Comments in a repository, by calling the following: DeleteCommentContent, which deletes the content of a comment on a commit in a repository. GetComment, which returns information about a comment on a commit. GetCommentReactions, which returns information about emoji reactions to comments. GetCommentsForComparedCommit, which returns information about comments on the comparison between two commit specifiers in a repository. PostCommentForComparedCommit, which creates a comment on the comparison between two commit specifiers in a repository. PostCommentReply, which creates a reply to a comment. PutCommentReaction, which creates or updates an emoji reaction to a comment. UpdateComment, which updates the content of a comment on a commit in a repository. Tags used to tag resources in CodeCommit (not Git tags), by calling the following: ListTagsForResource, which gets information about Amazon Web Servicestags for a specified Amazon Resource Name (ARN) in CodeCommit. TagResource, which adds or updates tags for a resource in CodeCommit. UntagResource, which removes tags for a resource in CodeCommit. Triggers, by calling the following: GetRepositoryTriggers, which returns information about triggers configured for a repository. PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers. TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target. For information about how to use CodeCommit, see the CodeCommit User Guide",
loadSpec: "aws/codecommit",
},
{
name: "codeconnections",
description:
"AWS CodeConnections This Amazon Web Services CodeConnections API Reference provides descriptions and usage examples of the operations and data types for the Amazon Web Services CodeConnections API. You can use the connections API to work with connections and installations. Connections are configurations that you use to connect Amazon Web Services resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection. When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one. When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections. You can work with connections by calling: CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline. DeleteConnection, which deletes the specified connection. GetConnection, which returns information about the connection, including the connection status. ListConnections, which lists the connections associated with your account. You can work with hosts by calling: CreateHost, which creates a host that represents the infrastructure where your provider is installed. DeleteHost, which deletes the specified host. GetHost, which returns information about the host, including the setup status. ListHosts, which lists the hosts associated with your account. You can work with tags in Amazon Web Services CodeConnections by calling the following: ListTagsForResource, which gets information about Amazon Web Services tags for a specified Amazon Resource Name (ARN) in Amazon Web Services CodeConnections. TagResource, which adds or updates tags for a resource in Amazon Web Services CodeConnections. UntagResource, which removes tags for a resource in Amazon Web Services CodeConnections. For information about how to use Amazon Web Services CodeConnections, see the Developer Tools User Guide",
loadSpec: "aws/codeconnections",
},
{
name: "codeguru-reviewer",
description:
"This section provides documentation for the Amazon CodeGuru Reviewer API operations. CodeGuru Reviewer is a service that uses program analysis and machine learning to detect potential defects that are difficult for developers to find and recommends fixes in your Java and Python code. By proactively detecting and providing recommendations for addressing code defects and implementing best practices, CodeGuru Reviewer improves the overall quality and maintainability of your code base during the code review stage. For more information about CodeGuru Reviewer, see the Amazon CodeGuru Reviewer User Guide. To improve the security of your CodeGuru Reviewer API calls, you can establish a private connection between your VPC and CodeGuru Reviewer by creating an interface VPC endpoint. For more information, see CodeGuru Reviewer and interface VPC endpoints (Amazon Web Services PrivateLink) in the Amazon CodeGuru Reviewer User Guide",
loadSpec: "aws/codeguru-reviewer",
},
{
name: "codeguru-security",
description:
"Amazon CodeGuru Security is in preview release and is subject to change. This section provides documentation for the Amazon CodeGuru Security API operations. CodeGuru Security is a service that uses program analysis and machine learning to detect security policy violations and vulnerabilities, and recommends ways to address these security risks. By proactively detecting and providing recommendations for addressing security risks, CodeGuru Security improves the overall security of your application code. For more information about CodeGuru Security, see the Amazon CodeGuru Security User Guide",
loadSpec: "aws/codeguru-security",
},
{
name: "codeguruprofiler",
description:
"This section provides documentation for the Amazon CodeGuru Profiler API operations. Amazon CodeGuru Profiler collects runtime performance data from your live applications, and provides recommendations that can help you fine-tune your application performance. Using machine learning algorithms, CodeGuru Profiler can help you find your most expensive lines of code and suggest ways you can improve efficiency and remove CPU bottlenecks. Amazon CodeGuru Profiler provides different visualizations of profiling data to help you identify what code is running on the CPU, see how much time is consumed, and suggest ways to reduce CPU utilization. Amazon CodeGuru Profiler currently supports applications written in all Java virtual machine (JVM) languages and Python. While CodeGuru Profiler supports both visualizations and recommendations for applications written in Java, it can also generate visualizations and a subset of recommendations for applications written in other JVM languages and Python. For more information, see What is Amazon CodeGuru Profiler in the Amazon CodeGuru Profiler User Guide",
loadSpec: "aws/codeguruprofiler",
},
{
name: "codepipeline",
description:
"CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Compute Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success",
loadSpec: "aws/codepipeline",
},
{
name: "codestar-connections",
description:
"AWS CodeStar Connections This Amazon Web Services CodeStar Connections API Reference provides descriptions and usage examples of the operations and data types for the Amazon Web Services CodeStar Connections API. You can use the connections API to work with connections and installations. Connections are configurations that you use to connect Amazon Web Services resources to external code repositories. Each connection is a resource that can be given to services such as CodePipeline to connect to a third-party repository such as Bitbucket. For example, you can add the connection in CodePipeline so that it triggers your pipeline when a code change is made to your third-party code repository. Each connection is named and associated with a unique ARN that is used to reference the connection. When you create a connection, the console initiates a third-party connection handshake. Installations are the apps that are used to conduct this handshake. For example, the installation for the Bitbucket provider type is the Bitbucket app. When you create a connection, you can choose an existing installation or create one. When you want to create a connection to an installed provider type such as GitHub Enterprise Server, you create a host for your connections. You can work with connections by calling: CreateConnection, which creates a uniquely named connection that can be referenced by services such as CodePipeline. DeleteConnection, which deletes the specified connection. GetConnection, which returns information about the connection, including the connection status. ListConnections, which lists the connections associated with your account. You can work with hosts by calling: CreateHost, which creates a host that represents the infrastructure where your provider is installed. DeleteHost, which deletes the specified host. GetHost, which returns information about the host, including the setup status. ListHosts, which lists the hosts associated with your account. You can work with tags in Amazon Web Services CodeStar Connections by calling the following: ListTagsForResource, which gets information about Amazon Web Services tags for a specified Amazon Resource Name (ARN) in Amazon Web Services CodeStar Connections. TagResource, which adds or updates tags for a resource in Amazon Web Services CodeStar Connections. UntagResource, which removes tags for a resource in Amazon Web Services CodeStar Connections. For information about how to use Amazon Web Services CodeStar Connections, see the Developer Tools User Guide",
loadSpec: "aws/codestar-connections",
},
{
name: "codestar-notifications",
description:
"This AWS CodeStar Notifications API Reference provides descriptions and usage examples of the operations and data types for the AWS CodeStar Notifications API. You can use the AWS CodeStar Notifications API to work with the following objects: Notification rules, by calling the following: CreateNotificationRule, which creates a notification rule for a resource in your account. DeleteNotificationRule, which deletes a notification rule. DescribeNotificationRule, which provides information about a notification rule. ListNotificationRules, which lists the notification rules associated with your account. UpdateNotificationRule, which changes the name, events, or targets associated with a notification rule. Subscribe, which subscribes a target to a notification rule. Unsubscribe, which removes a target from a notification rule. Targets, by calling the following: DeleteTarget, which removes a notification rule target from a notification rule. ListTargets, which lists the targets associated with a notification rule. Events, by calling the following: ListEventTypes, which lists the event types you can include in a notification rule. Tags, by calling the following: ListTagsForResource, which lists the tags already associated with a notification rule in your account. TagResource, which associates a tag you provide with a notification rule in your account. UntagResource, which removes a tag from a notification rule in your account. For information about how to use AWS CodeStar Notifications, see the Amazon Web Services Developer Tools Console User Guide",
loadSpec: "aws/codestar-notifications",
},
{
name: "cognito-identity",
description:
"Amazon Cognito Federated Identities Amazon Cognito Federated Identities is a web service that delivers scoped temporary credentials to mobile devices and other untrusted environments. It uniquely identifies a device and supplies the user with a consistent identity over the lifetime of an application. Using Amazon Cognito Federated Identities, you can enable authentication with one or more third-party identity providers (Facebook, Google, or Login with Amazon) or an Amazon Cognito user pool, and you can also choose to support unauthenticated access from your app. Cognito delivers a unique identifier for each user and acts as an OpenID token provider trusted by AWS Security Token Service (STS) to access temporary, limited-privilege AWS credentials. For a description of the authentication flow from the Amazon Cognito Developer Guide see Authentication Flow. For more information see Amazon Cognito Federated Identities",
loadSpec: "aws/cognito-identity",
},
{
name: "cognito-idp",
description:
"With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To authenticate users from third-party identity providers (IdPs) in this API, you can link IdP users to native user profiles. Learn more about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference. This API reference provides detailed information about API operations and object types in Amazon Cognito. Along with resource management operations, the Amazon Cognito user pools API includes classes of operations and authorization models for client-side and server-side authentication of users. You can interact with operations in the Amazon Cognito user pools API as any of the following subjects. An administrator who wants to configure user pools, app clients, users, groups, or other user pool functions. A server-side app, like a web application, that wants to use its Amazon Web Services privileges to manage, authenticate, or authorize a user. A client-side app, like a mobile app, that wants to make unauthenticated requests to manage, authenticate, or authorize a user. For more information, see Using the Amazon Cognito user pools API and user pool endpoints in the Amazon Cognito Developer Guide. With your Amazon Web Services SDK, you can build the logic to support operational flows in every use case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started with the CognitoIdentityProvider client in other supported Amazon Web Services SDKs. Amazon Web Services Command Line Interface Amazon Web Services SDK for .NET Amazon Web Services SDK for C++ Amazon Web Services SDK for Go Amazon Web Services SDK for Java V2 Amazon Web Services SDK for JavaScript Amazon Web Services SDK for PHP V3 Amazon Web Services SDK for Python Amazon Web Services SDK for Ruby V3 To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services SDKs",
loadSpec: "aws/cognito-idp",
},
{
name: "cognito-sync",
description:
"Amazon Cognito Sync Amazon Cognito Sync provides an AWS service and client library that enable cross-device syncing of application-related user data. High-level client libraries are available for both iOS and Android. You can use these libraries to persist data locally so that it's available even if the device is offline. Developer credentials don't need to be stored on the mobile device to access the service. You can use Amazon Cognito to obtain a normalized user ID and credentials. User data is persisted in a dataset that can store up to 1 MB of key-value pairs, and you can have up to 20 datasets per user identity. With Amazon Cognito Sync, the data stored for each identity is accessible only to credentials assigned to that identity. In order to use the Cognito Sync service, you need to make API calls using credentials retrieved with Amazon Cognito Identity service. If you want to use Cognito Sync in an Android or iOS application, you will probably want to make API calls via the AWS Mobile SDK. To learn more, see the Developer Guide for Android and the Developer Guide for iOS",
loadSpec: "aws/cognito-sync",
},
{
name: "comprehend",
description:
"Amazon Comprehend is an Amazon Web Services service for gaining insight into the content of documents. Use these actions to determine the topics contained in your documents, the topics they discuss, the predominant sentiment expressed in them, the predominant language used, and more",
loadSpec: "aws/comprehend",
},
{
name: "comprehendmedical",
description:
"Amazon Comprehend Medical extracts structured information from unstructured clinical text. Use these actions to gain insight in your documents. Amazon Comprehend Medical only detects entities in English language texts. Amazon Comprehend Medical places limits on the sizes of files allowed for different API operations. To learn more, see Guidelines and quotas in the Amazon Comprehend Medical Developer Guide",
loadSpec: "aws/comprehendmedical",
},
{
name: "compute-optimizer",
description:
"Compute Optimizer is a service that analyzes the configuration and utilization metrics of your Amazon Web Services compute resources, such as Amazon EC2 instances, Amazon EC2 Auto Scaling groups, Lambda functions, Amazon EBS volumes, and Amazon ECS services on Fargate. It reports whether your resources are optimal, and generates optimization recommendations to reduce the cost and improve the performance of your workloads. Compute Optimizer also provides recent utilization metric data, in addition to projected utilization metric data for the recommendations, which you can use to evaluate which recommendation provides the best price-performance trade-off. The analysis of your usage patterns can help you decide when to move or resize your running resources, and still meet your performance and capacity requirements. For more information about Compute Optimizer, including the required permissions to use the service, see the Compute Optimizer User Guide",
loadSpec: "aws/compute-optimizer",
},
{
name: "connect",
description:
"Amazon Connect actions Amazon Connect data types Amazon Connect is a cloud-based contact center solution that you use to set up and manage a customer contact center and provide reliable customer engagement at any scale. Amazon Connect provides metrics and real-time reporting that enable you to optimize contact routing. You can also resolve customer issues more efficiently by getting customers in touch with the appropriate agents. There are limits to the number of Amazon Connect resources that you can create. There are also limits to the number of requests that you can make per second. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide. You can use an endpoint to connect programmatically to an Amazon Web Services service. For a list of Amazon Connect endpoints, see Amazon Connect Endpoints",
loadSpec: "aws/connect",
},
{
name: "connect-contact-lens",
description:
"Contact Lens actions Contact Lens data types Amazon Connect Contact Lens enables you to analyze conversations between customer and agents, by using speech transcription, natural language processing, and intelligent search capabilities. It performs sentiment analysis, detects issues, and enables you to automatically categorize contacts. Amazon Connect Contact Lens provides both real-time and post-call analytics of customer-agent conversations. For more information, see Analyze conversations using speech analytics in the Amazon Connect Administrator Guide",
loadSpec: "aws/connect-contact-lens",
},
{
name: "connectcampaigns",
description: "Provide APIs to create and manage Amazon Connect Campaigns",
loadSpec: "aws/connectcampaigns",
},
{
name: "connectcampaignsv2",
description: "Provide APIs to create and manage Amazon Connect Campaigns",
loadSpec: "aws/connectcampaignsv2",
},
{
name: "connectcases",
description:
"With Amazon Connect Cases, your agents can track and manage customer issues that require multiple interactions, follow-up tasks, and teams in your contact center. A case represents a customer issue. It records the issue, the steps and interactions taken to resolve the issue, and the outcome. For more information, see Amazon Connect Cases in the Amazon Connect Administrator Guide",
loadSpec: "aws/connectcases",
},
{
name: "connectparticipant",
description:
"Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat. Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts",
loadSpec: "aws/connectparticipant",
},
{
name: "controlcatalog",
description:
"Welcome to the Amazon Web Services Control Catalog API reference. This guide is for developers who need detailed information about how to programmatically identify and filter the common controls and related metadata that are available to Amazon Web Services customers. This API reference provides descriptions, syntax, and usage examples for each of the actions and data types that are supported by Amazon Web Services Control Catalog. Use the following links to get started with the Amazon Web Services Control Catalog API: Actions: An alphabetical list of all Control Catalog API operations. Data types: An alphabetical list of all Control Catalog data types. Common parameters: Parameters that all operations can use. Common errors: Client and server errors that all operations can return",
loadSpec: "aws/controlcatalog",
},
{
name: "controltower",
description:
'Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources: Controls DisableControl EnableControl GetEnabledControl ListControlOperations ListEnabledControls UpdateEnabledControl Landing zones CreateLandingZone DeleteLandingZone GetLandingZone GetLandingZoneOperation ListLandingZones ListLandingZoneOperations ResetLandingZone UpdateLandingZone Baselines DisableBaseline EnableBaseline GetBaseline GetBaselineOperation GetEnabledBaseline ListBaselines ListEnabledBaselines ResetEnabledBaseline UpdateEnabledBaseline Tagging ListTagsForResource TagResource UntagResource For more information about these types of resources, see the Amazon Web Services Control Tower User Guide . About control APIs These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms "control" and "guardrail" are synonyms. To call these APIs, you\'ll need to know: the controlIdentifier for the control--or guardrail--you are targeting. the ARN associated with the target organizational unit (OU), which we call the targetIdentifier. the ARN associated with a resource that you wish to tag or untag. To get the controlIdentifier for your Amazon Web Services Control Tower control: The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation. About identifiers for Amazon Web Services Control Tower The Amazon Web Services Control Tower controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide. A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Amazon Web Services Control Tower Controls Reference Guide . Remember that Mandatory controls cannot be added or removed. Some controls have two identifiers ARN format for Amazon Web Services Control Tower: arn:aws:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID} Example: arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED ARN format for Amazon Web Services Control Catalog: arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID} You can find the {CONTROL_CATALOG_OPAQUE_ID} in the Amazon Web Services Control Tower Controls Reference Guide , or in the Amazon Web Services Control Tower console, on the Control details page. The Amazon Web Services Control Tower APIs for enabled controls, such as GetEnabledControl and ListEnabledControls always return an ARN of the same type given when the control was enabled. To get the targetIdentifier: The targetIdentifier is the ARN for an OU. In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. OU ARN format: arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId} About landing zone APIs You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs. For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the "Actions" section. About baseline APIs You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines. You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines. The individual API operations for baselines are detailed in this document, the API reference manual, in the "Actions" section. For usage examples, see Baseline API input and output examples with CLI. About Amazon Web Services Control Catalog identifiers The EnableControl and DisableControl API operations can be called by specifying either the Amazon Web Services Control Tower identifer or the Amazon Web Services Control Catalog identifier. The API response returns the same type of identifier that you specified when calling the API. If you use an Amazon Web Services Control Tower identifier to call the EnableControl API, and then call EnableControl again with an Amazon Web Services Control Catalog identifier, Amazon Web Services Control Tower returns an error message stating that the control is already enabled. Similar behavior applies to the DisableControl API operation. Mandatory controls and the landing-zone-level Region deny control have Amazon Web Services Control Tower identifiers only. Details and examples Control API input and output examples with CLI Baseline API input and output examples with CLI Enable controls with CloudFormation Launch a landing zone with CloudFormation Control metadata tables (large page) Control availability by Region tables (large page) List of identifiers for legacy controls Controls reference guide Controls library groupings Creating Amazon Web Services Control Tower resources with Amazon Web Services CloudFormation To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower Recording API Requests Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide',
loadSpec: "aws/controltower",
},
{
name: "cost-optimization-hub",
description:
"You can use the Cost Optimization Hub API to programmatically identify, filter, aggregate, and quantify savings for your cost optimization recommendations across multiple Amazon Web Services Regions and Amazon Web Services accounts in your organization. The Cost Optimization Hub API provides the following endpoint: https://cost-optimization-hub.us-east-1.amazonaws.com",
loadSpec: "aws/cost-optimization-hub",
},
{
name: "cur",
description:
"You can use the Amazon Web Services Cost and Usage Report API to programmatically create, query, and delete Amazon Web Services Cost and Usage Report definitions. Amazon Web Services Cost and Usage Report track the monthly Amazon Web Services costs and usage associated with your Amazon Web Services account. The report contains line items for each unique combination of Amazon Web Services product, usage type, and operation that your Amazon Web Services account uses. You can configure the Amazon Web Services Cost and Usage Report to show only the data that you want, using the Amazon Web Services Cost and Usage Report API. Service Endpoint The Amazon Web Services Cost and Usage Report API provides the following endpoint: cur.us-east-1.amazonaws.com",
loadSpec: "aws/cur",
},
{
name: "customer-profiles",
description:
"Amazon Connect Customer Profiles Customer Profiles actions Customer Profiles data types Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center. For more information about the Amazon Connect Customer Profiles feature, see Use Customer Profiles in the Amazon Connect Administrator's Guide",
loadSpec: "aws/customer-profiles",
},
{
name: "databrew",
description:
"Glue DataBrew is a visual, cloud-scale data-preparation service. DataBrew simplifies data preparation tasks, targeting data issues that are hard to spot and time-consuming to fix. DataBrew empowers users of all technical levels to visualize the data and perform one-click data transformations, with no coding required",
loadSpec: "aws/databrew",
},
{
name: "dataexchange",
description:
"AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud. As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIs to download or copy your entitled data sets to Amazon Simple Storage Service (Amazon S3) for use across a variety of AWS analytics and machine learning services. As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide. A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object, Redshift datashare, API Gateway API, AWS Lake Formation data permission, or Amazon S3 data access. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets",
loadSpec: "aws/dataexchange",
},
{
name: "datapipeline",
description:
"AWS Data Pipeline configures and manages a data-driven workflow called a pipeline. AWS Data Pipeline handles the details of scheduling and ensuring that data dependencies are met so that your application can focus on processing the data. AWS Data Pipeline provides a JAR implementation of a task runner called AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides logic for common data management scenarios, such as performing database queries and running data analysis using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data Pipeline Task Runner as your task runner, or you can write your own task runner to provide custom data management. AWS Data Pipeline implements two main sets of functionality. Use the first set to create a pipeline and define data sources, schedules, dependencies, and the transforms to be performed on the data. Use the second set in your task runner application to receive the next task ready for processing. The logic for performing the task, such as querying the data, running data analysis, or converting the data from one format to another, is contained within the task runner. The task runner performs the task assigned to it by the web service, reporting progress to the web service as it does so. When the task is done, the task runner reports the final success or failure of the task to the web service",
loadSpec: "aws/datapipeline",
},
{
name: "datasync",
description:
"DataSync DataSync is an online data movement and discovery service that simplifies data migration and helps you quickly, easily, and securely transfer your file or object data to, from, and between Amazon Web Services storage services. This API interface reference includes documentation for using DataSync programmatically. For complete information, see the DataSync User Guide",
loadSpec: "aws/datasync",
},
{
name: "datazone",
description:
"Amazon DataZone is a data management service that enables you to catalog, discover, govern, share, and analyze your data. With Amazon DataZone, you can share and access your data across accounts and supported regions. Amazon DataZone simplifies your experience across Amazon Web Services services, including, but not limited to, Amazon Redshift, Amazon Athena, Amazon Web Services Glue, and Amazon Web Services Lake Formation",
loadSpec: "aws/datazone",
},
{
name: "dax",
description:
"DAX is a managed caching service engineered for Amazon DynamoDB. DAX dramatically speeds up database reads by caching frequently-accessed data from DynamoDB, so applications can access that data with sub-millisecond latency. You can create a DAX cluster easily, using the AWS Management Console. With a few simple modifications to your code, your application can begin taking advantage of the DAX cluster and realize significant improvements in read performance",
loadSpec: "aws/dax",
},
{
name: "deadline",
description:
"The Amazon Web Services Deadline Cloud API provides infrastructure and centralized management for your projects. Use the Deadline Cloud API to onboard users, assign projects, and attach permissions specific to their job function. With Deadline Cloud, content production teams can deploy resources for their workforce securely in the cloud, reducing the costs of added physical infrastructure. Keep your content production operations secure, while allowing your contributors to access the tools they need, such as scalable high-speed storage, licenses, and cost management services",
loadSpec: "aws/deadline",
},
{
name: "detective",
description:
'Detective uses machine learning and purpose-built visualizations to help you to analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty. The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account. To add a member account to the behavior graph, the administrator account sends an invitation to the account. When the account accepts the invitation, it becomes a member account in the behavior graph. Detective is also integrated with Organizations. The organization management account designates the Detective administrator account for the organization. That account becomes the administrator account for the organization behavior graph. The Detective administrator account is also the delegated administrator account for Detective in Organizations. The Detective administrator account can enable any organization account as a member account in the organization behavior graph. The organization accounts do not receive invitations. The Detective administrator account can also invite other accounts to the organization behavior graph. Every behavior graph is specific to a Region. You can only use the API to manage behavior graphs that belong to the Region that is associated with the currently selected endpoint. The administrator account for a behavior graph can use the Detective API to do the following: Enable and disable Detective. Enabling Detective creates a new behavior graph. View the list of member accounts in a behavior graph. Add member accounts to a behavior graph. Remove member accounts from a behavior graph. Apply tags to a behavior graph. The organization management account can use the Detective API to select the delegated administrator for Detective. The Detective administrator account for an organization can use the Detective API to do the following: Perform all of the functions of an administrator account. Determine whether to automatically enable new organization accounts as member accounts in the organization behavior graph. An invited member account can use the Detective API to do the following: View the list of behavior graphs that they are invited to. Accept an invitation to contribute to a behavior graph. Decline an invitation to contribute to a behavior graph. Remove their account from a behavior graph. All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail. We replaced the term "master account" with the term "administrator account". An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph',
loadSpec: "aws/detective",
},
{
name: "devicefarm",
description:
"Welcome to the AWS Device Farm API documentation, which contains APIs for: Testing on desktop browsers Device Farm makes it possible for you to test your web applications on desktop browsers using Selenium. The APIs for desktop browser testing contain TestGrid in their names. For more information, see Testing Web Applications on Selenium with Device Farm. Testing on real mobile devices Device Farm makes it possible for you to test apps on physical phones, tablets, and other devices in the cloud. For more information, see the Device Farm Developer Guide",
loadSpec: "aws/devicefarm",
},
{
name: "devops-guru",
description:
"Amazon DevOps Guru is a fully managed service that helps you identify anomalous behavior in business critical operational applications. You specify the Amazon Web Services resources that you want DevOps Guru to cover, then the Amazon CloudWatch metrics and Amazon Web Services CloudTrail events related to those resources are analyzed. When anomalous behavior is detected, DevOps Guru creates an insight that includes recommendations, related events, and related metrics that can help you improve your operational applications. For more information, see What is Amazon DevOps Guru. You can specify 1 or 2 Amazon Simple Notification Service topics so you are notified every time a new insight is created. You can also enable DevOps Guru to generate an OpsItem in Amazon Web Services Systems Manager for each insight to help you manage and track your work addressing insights. To learn about the DevOps Guru workflow, see How DevOps Guru works. To learn about DevOps Guru concepts, see Concepts in DevOps Guru",
loadSpec: "aws/devops-guru",
},
{
name: "directconnect",
description:
"Direct Connect links your internal network to an Direct Connect location over a standard Ethernet fiber-optic cable. One end of the cable is connected to your router, the other to an Direct Connect router. With this connection in place, you can create virtual interfaces directly to the Amazon Web Services Cloud (for example, to Amazon EC2 and Amazon S3) and to Amazon VPC, bypassing Internet service providers in your network path. A connection provides access to all Amazon Web Services Regions except the China (Beijing) and (China) Ningxia Regions. Amazon Web Services resources in the China Regions can only be accessed through locations associated with those Regions",
loadSpec: "aws/directconnect",
},
{
name: "discovery",
description:
"Amazon Web Services Application Discovery Service Amazon Web Services Application Discovery Service (Application Discovery Service) helps you plan application migration projects. It automatically identifies servers, virtual machines (VMs), and network dependencies in your on-premises data centers. For more information, see the Amazon Web Services Application Discovery Service FAQ. Application Discovery Service offers three ways of performing discovery and collecting data about your on-premises servers: Agentless discovery using Amazon Web Services Application Discovery Service Agentless Collector (Agentless Collector), which doesn't require you to install an agent on each host. Agentless Collector gathers server information regardless of the operating systems, which minimizes the time required for initial on-premises infrastructure assessment. Agentless Collector doesn't collect information about network dependencies, only agent-based discovery collects that information. Agent-based discovery using the Amazon Web Services Application Discovery Agent (Application Discovery Agent) collects a richer set of data than agentless discovery, which you install on one or more hosts in your data center. The agent captures infrastructure and application information, including an inventory of running processes, system performance information, resource utilization, and network dependencies. The information collected by agents is secured at rest and in transit to the Application Discovery Service database in the Amazon Web Services cloud. For more information, see Amazon Web Services Application Discovery Agent. Amazon Web Services Partner Network (APN) solutions integrate with Application Discovery Service, enabling you to import details of your on-premises environment directly into Amazon Web Services Migration Hub (Migration Hub) without using Agentless Collector or Application Discovery Agent. Third-party application discovery tools can query Amazon Web Services Application Discovery Service, and they can write to the Application Discovery Service database using the public API. In this way, you can import data into Migration Hub and view it, so that you can associate applications with servers and track migrations. Working With This Guide This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the Amazon Web Services SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. Remember that you must set your Migration Hub home Region before you call any of these APIs. You must make API calls for write actions (create, notify, associate, disassociate, import, or put) while in your home Region, or a HomeRegionNotSetException error is returned. API calls for read actions (list, describe, stop, and delete) are permitted outside of your home Region. Although it is unlikely, the Migration Hub home Region could change. If you call APIs outside the home Region, an InvalidInputException is returned. You must call GetHomeRegion to obtain the latest Migration Hub home Region. This guide is intended for use with the Amazon Web Services Application Discovery Service User Guide. All data is handled according to the Amazon Web Services Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service",
loadSpec: "aws/discovery",
},
{
name: "dlm",
description:
"Amazon Data Lifecycle Manager With Amazon Data Lifecycle Manager, you can manage the lifecycle of your Amazon Web Services resources. You create lifecycle policies, which are used to automate operations on the specified resources. Amazon Data Lifecycle Manager supports Amazon EBS volumes and snapshots. For information about using Amazon Data Lifecycle Manager with Amazon EBS, see Amazon Data Lifecycle Manager in the Amazon EC2 User Guide",
loadSpec: "aws/dlm",
},
{
name: "dms",
description:
"Database Migration Service Database Migration Service (DMS) can migrate your data to and from the most widely used commercial and open-source databases such as Oracle, PostgreSQL, Microsoft SQL Server, Amazon Redshift, MariaDB, Amazon Aurora, MySQL, and SAP Adaptive Server Enterprise (ASE). The service supports homogeneous migrations such as Oracle to Oracle, as well as heterogeneous migrations between different database platforms, such as Oracle to MySQL or SQL Server to PostgreSQL. For more information about DMS, see What Is Database Migration Service? in the Database Migration Service User Guide",
loadSpec: "aws/dms",
},
{
name: "docdb",
description:
"Amazon DocumentDB is a fast, reliable, and fully managed database service. Amazon DocumentDB makes it easy to set up, operate, and scale MongoDB-compatible databases in the cloud. With Amazon DocumentDB, you can run the same application code and use the same drivers and tools that you use with MongoDB",
loadSpec: "aws/docdb",
},
{
name: "docdb-elastic",
description:
"Amazon DocumentDB elastic clusters Amazon DocumentDB elastic-clusters support workloads with millions of reads/writes per second and petabytes of storage capacity. Amazon DocumentDB elastic clusters also simplify how developers interact with Amazon DocumentDB elastic-clusters by eliminating the need to choose, manage or upgrade instances. Amazon DocumentDB elastic-clusters were created to: provide a solution for customers looking for a database that provides virtually limitless scale with rich query capabilities and MongoDB API compatibility. give customers higher connection limits, and to reduce downtime from patching. continue investing in a cloud-native, elastic, and class leading architecture for JSON workloads",
loadSpec: "aws/docdb-elastic",
},
{
name: "drs",
description: "AWS Elastic Disaster Recovery Service",
loadSpec: "aws/drs",
},
{
name: "ds",
description:
"Directory Service Directory Service is a web service that makes it easy for you to setup and run directories in the Amazon Web Services cloud, or connect your Amazon Web Services resources with an existing self-managed Microsoft Active Directory. This guide provides detailed information about Directory Service operations, data types, parameters, and errors. For information about Directory Services features, see Directory Service and the Directory Service Administration Guide. Amazon Web Services provides SDKs that consist of libraries and sample code for various programming languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs provide a convenient way to create programmatic access to Directory Service and other Amazon Web Services services. For more information about the Amazon Web Services SDKs, including how to download and install them, see Tools for Amazon Web Services",
loadSpec: "aws/ds",
},
{
name: "ds-data",
description:
"Amazon Web Services Directory Service Data is an extension of Directory Service. This API reference provides detailed information about Directory Service Data operations and object types. With Directory Service Data, you can create, read, update, and delete users, groups, and memberships from your Managed Microsoft AD without additional costs and without deploying dedicated management instances. You can also perform built-in object management tasks across directories without direct network connectivity, which simplifies provisioning and access management to achieve fully automated deployments. Directory Service Data supports user and group write operations, such as CreateUser and CreateGroup, within the organizational unit (OU) of your Managed Microsoft AD. Directory Service Data supports read operations, such as ListUsers and ListGroups, on all users, groups, and group memberships within your Managed Microsoft AD and across trusted realms. Directory Service Data supports adding and removing group members in your OU and the Amazon Web Services Delegated Groups OU, so you can grant and deny access to specific roles and permissions. For more information, see Manage users and groups in the Directory Service Administration Guide. Directory management operations and configuration changes made against the Directory Service API will also reflect in Directory Service Data API with eventual consistency. You can expect a short delay between management changes, such as adding a new directory trust and calling the Directory Service Data API for the newly created trusted realm. Directory Service Data connects to your Managed Microsoft AD domain controllers and performs operations on underlying directory objects. When you create your Managed Microsoft AD, you choose subnets for domain controllers that Directory Service creates on your behalf. If a domain controller is unavailable, Directory Service Data uses an available domain controller. As a result, you might notice eventual consistency while objects replicate from one domain controller to another domain controller. For more information, see What gets created in the Directory Service Administration Guide. Directory limits vary by Managed Microsoft AD edition: Standard edition \u2013 Supports 8 transactions per second (TPS) for read operations and 4 TPS for write operations per directory. There's a concurrency limit of 10 concurrent requests. Enterprise edition \u2013 Supports 16 transactions per second (TPS) for read operations and 8 TPS for write operations per directory. There's a concurrency limit of 10 concurrent requests. Amazon Web Services Account - Supports a total of 100 TPS for Directory Service Data operations across all directories. Directory Service Data only supports the Managed Microsoft AD directory type and is only available in the primary Amazon Web Services Region. For more information, see Managed Microsoft AD and Primary vs additional Regions in the Directory Service Administration Guide",
loadSpec: "aws/ds-data",
},
{
name: "dsql",
description:
"This is an interface reference for Amazon Aurora DSQL. It contains documentation for one of the programming or command line interfaces you can use to manage Amazon Aurora DSQL. Amazon Aurora DSQL is a serverless, distributed SQL database suitable for workloads of any size. Aurora DSQL is available in both single-Region and multi-Region configurations, so your clusters and databases are always available even if an Availability Zone or an Amazon Web Services Region are unavailable. Aurora DSQL lets you focus on using your data to acquire new insights for your business and customers",
loadSpec: "aws/dsql",
},
{
name: "dynamodb",
description:
"Amazon DynamoDB Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed database, so that you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling. With DynamoDB, you can create database tables that can store and retrieve any amount of data, and serve any level of request traffic. You can scale up or scale down your tables' throughput capacity without downtime or performance degradation, and use the Amazon Web Services Management Console to monitor resource utilization and performance metrics. DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is stored on solid state disks (SSDs) and automatically replicated across multiple Availability Zones in an Amazon Web Services Region, providing built-in high availability and data durability",
loadSpec: "aws/dynamodb",
},
{
name: "dynamodbstreams",
description:
"Amazon DynamoDB Amazon DynamoDB Streams provides API actions for accessing streams and processing stream records. To learn more about application development with Streams, see Capturing Table Activity with DynamoDB Streams in the Amazon DynamoDB Developer Guide",
loadSpec: "aws/dynamodbstreams",
},
{
name: "ebs",
description:
"You can use the Amazon Elastic Block Store (Amazon EBS) direct APIs to create Amazon EBS snapshots, write data directly to your snapshots, read data on your snapshots, and identify the differences or changes between two snapshots. If you\u2019re an independent software vendor (ISV) who offers backup services for Amazon EBS, the EBS direct APIs make it more efficient and cost-effective to track incremental changes on your Amazon EBS volumes through snapshots. This can be done without having to create new volumes from snapshots, and then use Amazon Elastic Compute Cloud (Amazon EC2) instances to compare the differences. You can create incremental snapshots directly from data on-premises into volumes and the cloud to use for quick disaster recovery. With the ability to write and read snapshots, you can write your on-premises data to an snapshot during a disaster. Then after recovery, you can restore it back to Amazon Web Services or on-premises from the snapshot. You no longer need to build and maintain complex mechanisms to copy data to and from Amazon EBS. This API reference provides detailed information about the actions, data types, parameters, and errors of the EBS direct APIs. For more information about the elements that make up the EBS direct APIs, and examples of how to use them effectively, see Accessing the Contents of an Amazon EBS Snapshot in the Amazon Elastic Compute Cloud User Guide. For more information about the supported Amazon Web Services Regions, endpoints, and service quotas for the EBS direct APIs, see Amazon Elastic Block Store Endpoints and Quotas in the Amazon Web Services General Reference",
loadSpec: "aws/ebs",
},
{
name: "ec2",
description:
"Amazon Elastic Compute Cloud You can access the features of Amazon Elastic Compute Cloud (Amazon EC2) programmatically. For more information, see the Amazon EC2 Developer Guide",
loadSpec: "aws/ec2",
},
{
name: "ec2-instance-connect",
description:
"This is the Amazon EC2 Instance Connect API Reference. It provides descriptions, syntax, and usage examples for each of the actions for Amazon EC2 Instance Connect. Amazon EC2 Instance Connect enables system administrators to publish one-time use SSH public keys to EC2, providing users a simple and secure way to connect to their instances. To view the Amazon EC2 Instance Connect content in the Amazon EC2 User Guide, see Connect to your Linux instance using EC2 Instance Connect. For Amazon EC2 APIs, see the Amazon EC2 API Reference",
loadSpec: "aws/ec2-instance-connect",
},
{
name: "ecr",
description:
"Amazon Elastic Container Registry Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR supports private repositories with resource-based permissions using IAM so that specific users or Amazon EC2 instances can access repositories and images. Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the Amazon Web Services General Reference",
loadSpec: "aws/ecr",
},
{
name: "ecr-public",
description:
"Amazon Elastic Container Registry Public Amazon Elastic Container Registry Public (Amazon ECR Public) is a managed container image registry service. Amazon ECR provides both public and private registries to host your container images. You can use the Docker CLI or your preferred client to push, pull, and manage images. Amazon ECR provides a secure, scalable, and reliable registry for your Docker or Open Container Initiative (OCI) images. Amazon ECR supports public repositories with this API. For information about the Amazon ECR API for private repositories, see Amazon Elastic Container Registry API Reference",
loadSpec: "aws/ecr-public",
},
{
name: "ecs",
description:
"Amazon Elastic Container Service Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes it easy to run, stop, and manage Docker containers. You can host your cluster on a serverless infrastructure that's managed by Amazon ECS by launching your services or tasks on Fargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) or External (on-premises) instances that you manage. Amazon ECS makes it easy to launch and stop container-based applications with simple API calls. This makes it easy to get the state of your cluster from a centralized service, and gives you access to many familiar Amazon EC2 features. You can use Amazon ECS to schedule the placement of containers across your cluster based on your resource needs, isolation policies, and availability requirements. With Amazon ECS, you don't need to operate your own cluster management and configuration management systems. You also don't need to worry about scaling your management infrastructure",
loadSpec: "aws/ecs",
},
{
name: "efs",
description:
"Amazon Elastic File System Amazon Elastic File System (Amazon EFS) provides simple, scalable file storage for use with Amazon EC2 Linux and Mac instances in the Amazon Web Services Cloud. With Amazon EFS, storage capacity is elastic, growing and shrinking automatically as you add and remove files, so that your applications have the storage they need, when they need it. For more information, see the Amazon Elastic File System API Reference and the Amazon Elastic File System User Guide",
loadSpec: "aws/efs",
},
{
name: "eks",
description:
"Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on Amazon Web Services without needing to setup or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications. Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required",
loadSpec: "aws/eks",
},
{
name: "eks-auth",
description:
"The Amazon EKS Auth API and the AssumeRoleForPodIdentity action are only used by the EKS Pod Identity Agent",
loadSpec: "aws/eks-auth",
},
{
name: "elastic-inference",
description:
"Amazon Elastic Inference is no longer available. Elastic Inference public APIs",
loadSpec: "aws/elastic-inference",
},
{
name: "elasticache",
description:
"Amazon ElastiCache Amazon ElastiCache is a web service that makes it easier to set up, operate, and scale a distributed cache in the cloud. With ElastiCache, customers get all of the benefits of a high-performance, in-memory cache with less of the administrative burden involved in launching and managing a distributed cache. The service makes setup, scaling, and cluster failure handling much simpler than in a self-managed cache deployment. In addition, through integration with Amazon CloudWatch, customers get enhanced visibility into the key performance statistics associated with their cache and can receive alarms if a part of their cache runs hot",
loadSpec: "aws/elasticache",
},
{
name: "elasticbeanstalk",
description:
"AWS Elastic Beanstalk AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage scalable, fault-tolerant applications running on the Amazon Web Services cloud. For more information about this product, go to the AWS Elastic Beanstalk details page. The location of the latest AWS Elastic Beanstalk WSDL is https://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl. To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that enable you to access the API, go to Tools for Amazon Web Services. Endpoints For a list of region-specific endpoints that AWS Elastic Beanstalk supports, go to Regions and Endpoints in the Amazon Web Services Glossary",
loadSpec: "aws/elasticbeanstalk",
},
{
name: "elastictranscoder",
description:
"AWS Elastic Transcoder Service The AWS Elastic Transcoder Service",
loadSpec: "aws/elastictranscoder",
},
{
name: "elb",
description:
"Elastic Load Balancing A load balancer can distribute incoming traffic across your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered instances and ensures that it routes traffic only to healthy instances. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer and a protocol and port number for connections from the load balancer to the instances. Elastic Load Balancing supports three types of load balancers: Application Load Balancers, Network Load Balancers, and Classic Load Balancers. You can select a load balancer based on your application needs. For more information, see the Elastic Load Balancing User Guide. This reference covers the 2012-06-01 API, which supports Classic Load Balancers. The 2015-12-01 API supports Application Load Balancers and Network Load Balancers. To get started, create a load balancer with one or more listeners using CreateLoadBalancer. Register your instances with the load balancer using RegisterInstancesWithLoadBalancer. All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds with a 200 OK response code",
loadSpec: "aws/elb",
},
{
name: "elbv2",
description:
"Elastic Load Balancing A load balancer distributes incoming traffic across targets, such as your EC2 instances. This enables you to increase the availability of your application. The load balancer also monitors the health of its registered targets and ensures that it routes traffic only to healthy targets. You configure your load balancer to accept incoming traffic by specifying one or more listeners, which are configured with a protocol and port number for connections from clients to the load balancer. You configure a target group with a protocol and port number for connections from the load balancer to the targets, and with health check settings to be used when checking the health status of the targets. Elastic Load Balancing supports the following types of load balancers: Application Load Balancers, Network Load Balancers, Gateway Load Balancers, and Classic Load Balancers. This reference covers the following load balancer types: Application Load Balancer - Operates at the application layer (layer 7) and supports HTTP and HTTPS. Network Load Balancer - Operates at the transport layer (layer 4) and supports TCP, TLS, and UDP. Gateway Load Balancer - Operates at the network layer (layer 3). For more information, see the Elastic Load Balancing User Guide. All Elastic Load Balancing operations are idempotent, which means that they complete at most one time. If you repeat an operation, it succeeds",
loadSpec: "aws/elbv2",
},
{
name: "emr",
description:
"Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management",
loadSpec: "aws/emr",
},
{
name: "emr-containers",
description:
'Amazon EMR on EKS provides a deployment option for Amazon EMR that allows you to run open-source big data frameworks on Amazon Elastic Kubernetes Service (Amazon EKS). With this deployment option, you can focus on running analytics workloads while Amazon EMR on EKS builds, configures, and manages containers for open-source applications. For more information about Amazon EMR on EKS concepts and tasks, see What is Amazon EMR on EKS. Amazon EMR containers is the API name for Amazon EMR on EKS. The emr-containers prefix is used in the following scenarios: It is the prefix in the CLI commands for Amazon EMR on EKS. For example, aws emr-containers start-job-run. It is the prefix before IAM policy actions for Amazon EMR on EKS. For example, "Action": [ "emr-containers:StartJobRun"]. For more information, see Policy actions for Amazon EMR on EKS. It is the prefix used in Amazon EMR on EKS service endpoints. For example, emr-containers.us-east-2.amazonaws.com. For more information, see Amazon EMR on EKSService Endpoints',
loadSpec: "aws/emr-containers",
},
{
name: "emr-serverless",
description:
'Amazon EMR Serverless is a new deployment option for Amazon EMR. Amazon EMR Serverless provides a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive. With Amazon EMR Serverless, you don\u2019t have to configure, optimize, secure, or operate clusters to run applications with these frameworks. The API reference to Amazon EMR Serverless is emr-serverless. The emr-serverless prefix is used in the following scenarios: It is the prefix in the CLI commands for Amazon EMR Serverless. For example, aws emr-serverless start-job-run. It is the prefix before IAM policy actions for Amazon EMR Serverless. For example, "Action": ["emr-serverless:StartJobRun"]. For more information, see Policy actions for Amazon EMR Serverless. It is the prefix used in Amazon EMR Serverless service endpoints. For example, emr-serverless.us-east-2.amazonaws.com',
loadSpec: "aws/emr-serverless",
},
{
name: "entityresolution",
description:
"Welcome to the Entity Resolution API Reference. Entity Resolution is an Amazon Web Services service that provides pre-configured entity resolution capabilities that enable developers and analysts at advertising and marketing companies to build an accurate and complete view of their consumers. With Entity Resolution, you can match source records containing consumer identifiers, such as name, email address, and phone number. This is true even when these records have incomplete or conflicting identifiers. For example, Entity Resolution can effectively match a source record from a customer relationship management (CRM) system with a source record from a marketing system containing campaign information. To learn more about Entity Resolution concepts, procedures, and best practices, see the Entity Resolution User Guide",
loadSpec: "aws/entityresolution",
},
{
name: "es",
description:
"Amazon Elasticsearch Configuration Service Use the Amazon Elasticsearch Configuration API to create, configure, and manage Elasticsearch domains. For sample code that uses the Configuration API, see the Amazon Elasticsearch Service Developer Guide. The guide also contains sample code for sending signed HTTP requests to the Elasticsearch APIs. The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints",
loadSpec: "aws/es",
},
{
name: "events",
description:
"Amazon EventBridge helps you to respond to state changes in your Amazon Web Services resources. When your resources change state, they automatically send events to an event stream. You can create rules that match selected events in the stream and route them to targets to take action. You can also use rules to take action on a predetermined schedule. For example, you can configure rules to: Automatically invoke an Lambda function to update DNS entries when an event notifies you that Amazon EC2 instance enters the running state. Direct specific API records from CloudTrail to an Amazon Kinesis data stream for detailed analysis of potential security or availability risks. Periodically invoke a built-in target to create a snapshot of an Amazon EBS volume. For more information about the features of Amazon EventBridge, see the Amazon EventBridge User Guide",
loadSpec: "aws/events",
},
{
name: "evidently",
description:
"You can use Amazon CloudWatch Evidently to safely validate new features by serving them to a specified percentage of your users while you roll out the feature. You can monitor the performance of the new feature to help you decide when to ramp up traffic to your users. This helps you reduce risk and identify unintended consequences before you fully launch the feature. You can also conduct A/B experiments to make feature design decisions based on evidence and data. An experiment can test as many as five variations at once. Evidently collects experiment data and analyzes it using statistical methods. It also provides clear recommendations about which variations perform better. You can test both user-facing features and backend features",
loadSpec: "aws/evidently",
},
{
name: "finspace",
description:
"The FinSpace management service provides the APIs for managing FinSpace environments",
loadSpec: "aws/finspace",
},
{
name: "finspace-data",
description: "The FinSpace APIs let you take actions inside the FinSpace",
loadSpec: "aws/finspace-data",
},
{
name: "firehose",
description:
"Amazon Data Firehose Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose. Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supported destinations",
loadSpec: "aws/firehose",
},
{
name: "fis",
description:
"Amazon Web Services Fault Injection Service is a managed service that enables you to perform fault injection experiments on your Amazon Web Services workloads. For more information, see the Fault Injection Service User Guide",
loadSpec: "aws/fis",
},
{
name: "fms",
description:
"This is the Firewall Manager API Reference. This guide is for developers who need detailed information about the Firewall Manager API actions, data types, and errors. For detailed information about Firewall Manager features, see the Firewall Manager Developer Guide. Some API actions require explicit resource permissions. For information, see the developer guide topic Service roles for Firewall Manager",
loadSpec: "aws/fms",
},
{
name: "forecast",
description:
"Provides APIs for creating and managing Amazon Forecast resources",
loadSpec: "aws/forecast",
},
{
name: "forecastquery",
description:
"Provides APIs for creating and managing Amazon Forecast resources",
loadSpec: "aws/forecastquery",
},
{
name: "frauddetector",
description:
"This is the Amazon Fraud Detector API Reference. This guide is for developers who need detailed information about Amazon Fraud Detector API actions, data types, and errors. For more information about Amazon Fraud Detector features, see the Amazon Fraud Detector User Guide. We provide the Query API as well as AWS software development kits (SDK) for Amazon Fraud Detector in Java and Python programming languages. The Amazon Fraud Detector Query API provides HTTPS requests that use the HTTP verb GET or POST and a Query parameter Action. AWS SDK provides libraries, sample code, tutorials, and other resources for software developers who prefer to build applications using language-specific APIs instead of submitting a request over HTTP or HTTPS. These libraries provide basic functions that automatically take care of tasks such as cryptographically signing your requests, retrying requests, and handling error responses, so that it is easier for you to get started. For more information about the AWS SDKs, go to Tools to build on AWS page, scroll down to the SDK section, and choose plus (+) sign to expand the section",
loadSpec: "aws/frauddetector",
},
{
name: "freetier",
description:
"You can use the Amazon Web Services Free Tier API to query programmatically your Free Tier usage data. Free Tier tracks your monthly usage data for all free tier offers that are associated with your Amazon Web Services account. You can use the Free Tier API to filter and show only the data that you want. Service endpoint The Free Tier API provides the following endpoint: https://freetier.us-east-1.api.aws For more information, see Using the Amazon Web Services Free Tier in the Billing User Guide",
loadSpec: "aws/freetier",
},
{
name: "fsx",
description:
"Amazon FSx is a fully managed service that makes it easy for storage and application administrators to launch and use shared file storage",
loadSpec: "aws/fsx",
},
{
name: "gamelift",
description:
"Amazon GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on Amazon Web Services global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand. About Amazon GameLift solutions Get more information on these Amazon GameLift solutions in the Amazon GameLift Developer Guide. Amazon GameLift managed hosting -- Amazon GameLift offers a fully managed service to set up and maintain computing machines for hosting, manage game session and player session life cycle, and handle security, storage, and performance tracking. You can use automatic scaling tools to balance player demand and hosting costs, configure your game session management to minimize player latency, and add FlexMatch for matchmaking. Managed hosting with Realtime Servers -- With Amazon GameLift Realtime Servers, you can quickly configure and set up ready-to-go game servers for your game. Realtime Servers provides a game server framework with core Amazon GameLift infrastructure already built in. Then use the full range of Amazon GameLift managed hosting features, including FlexMatch, for your game. Amazon GameLift FleetIQ -- Use Amazon GameLift FleetIQ as a standalone service while hosting your games using EC2 instances and Auto Scaling groups. Amazon GameLift FleetIQ provides optimizations for game hosting, including boosting the viability of low-cost Spot Instances gaming. For a complete solution, pair the Amazon GameLift FleetIQ and FlexMatch standalone services. Amazon GameLift FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a customizable matchmaking service for multiplayer games. Use FlexMatch as integrated with Amazon GameLift managed hosting or incorporate FlexMatch as a standalone service into your own hosting solution. About this API Reference This reference guide describes the low-level service API for Amazon GameLift. With each topic in this guide, you can find links to language-specific SDK guides and the Amazon Web Services CLI reference. Useful links: Amazon GameLift API operations listed by tasks Amazon GameLift tools and resources",
loadSpec: "aws/gamelift",
},
{
name: "geo-maps",
description:
"Integrate high-quality base map data into your applications using MapLibre. Capabilities include: Access to comprehensive base map data, allowing you to tailor the map display to your specific needs. Multiple pre-designed map styles suited for various application types, such as navigation, logistics, or data visualization. Generation of static map images for scenarios where interactive maps aren't suitable, such as: Embedding in emails or documents Displaying in low-bandwidth environments Creating printable maps Enhancing application performance by reducing client-side rendering",
loadSpec: "aws/geo-maps",
},
{
name: "geo-places",
description:
"The Places API enables powerful location search and geocoding capabilities for your applications, offering global coverage with rich, detailed information. Key features include: Forward and reverse geocoding for addresses and coordinates Comprehensive place searches with detailed information, including: Business names and addresses Contact information Hours of operation POI (Points of Interest) categories Food types for restaurants Chain affiliation for relevant businesses Global data coverage with a wide range of POI categories Regular data updates to ensure accuracy and relevance",
loadSpec: "aws/geo-places",
},
{
name: "geo-routes",
description:
"With the Amazon Location Routes API you can calculate routes and estimate travel time based on up-to-date road network and live traffic information. Calculate optimal travel routes and estimate travel times using up-to-date road network and traffic data. Key features include: Point-to-point routing with estimated travel time, distance, and turn-by-turn directions Multi-point route optimization to minimize travel time or distance Route matrices for efficient multi-destination planning Isoline calculations to determine reachable areas within specified time or distance thresholds Map-matching to align GPS traces with the road network",
loadSpec: "aws/geo-routes",
},
{
name: "glacier",
description:
'Amazon S3 Glacier (Glacier) is a storage solution for "cold data." Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Glacier, customers can store their data cost effectively for months, years, or decades. Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don\'t have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations. Glacier is a great storage choice when low storage cost is paramount and your data is rarely retrieved. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3). You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Glacier. If you are a first-time user of Glacier, we recommend that you begin by reading the following sections in the Amazon S3 Glacier Developer Guide: What is Amazon S3 Glacier - This section of the Developer Guide describes the underlying data model, the operations it supports, and the AWS SDKs that you can use to interact with the service. Getting Started with Amazon S3 Glacier - The Getting Started section walks you through the process of creating a vault, uploading archives, creating jobs to download archives, retrieving the job output, and deleting archives',
loadSpec: "aws/glacier",
},
{
name: "globalaccelerator",
description:
"Global Accelerator This is the Global Accelerator API Reference. This guide is for developers who need detailed information about Global Accelerator API actions, data types, and errors. For more information about Global Accelerator features, see the Global Accelerator Developer Guide. Global Accelerator is a service in which you create accelerators to improve the performance of your applications for local and global users. Depending on the type of accelerator you choose, you can gain additional benefits. By using a standard accelerator, you can improve availability of your internet applications that are used by a global audience. With a standard accelerator, Global Accelerator directs traffic to optimal endpoints over the Amazon Web Services global network. For other scenarios, you might choose a custom routing accelerator. With a custom routing accelerator, you can use application logic to directly map one or more users to a specific endpoint among many endpoints. Global Accelerator is a global service that supports endpoints in multiple Amazon Web Services Regions but you must specify the US West (Oregon) Region to create, update, or otherwise work with accelerators. That is, for example, specify --region us-west-2 on Amazon Web Services CLI commands. By default, Global Accelerator provides you with static IP addresses that you associate with your accelerator. The static IP addresses are anycast from the Amazon Web Services edge network. For IPv4, Global Accelerator provides two static IPv4 addresses. For dual-stack, Global Accelerator provides a total of four addresses: two static IPv4 addresses and two static IPv6 addresses. With a standard accelerator for IPv4, instead of using the addresses that Global Accelerator provides, you can configure these entry points to be IPv4 addresses from your own IP address ranges that you bring to Global Accelerator (BYOIP). For a standard accelerator, they distribute incoming application traffic across multiple endpoint resources in multiple Amazon Web Services Regions , which increases the availability of your applications. Endpoints for standard accelerators can be Network Load Balancers, Application Load Balancers, Amazon EC2 instances, or Elastic IP addresses that are located in one Amazon Web Services Region or multiple Amazon Web Services Regions. For custom routing accelerators, you map traffic that arrives to the static IP addresses to specific Amazon EC2 servers in endpoints that are virtual private cloud (VPC) subnets. The static IP addresses remain assigned to your accelerator for as long as it exists, even if you disable the accelerator and it no longer accepts or routes traffic. However, when you delete an accelerator, you lose the static IP addresses that are assigned to it, so you can no longer route traffic by using them. You can use IAM policies like tag-based permissions with Global Accelerator to limit the users who have permissions to delete an accelerator. For more information, see Tag-based policies. For standard accelerators, Global Accelerator uses the Amazon Web Services global network to route traffic to the optimal regional endpoint based on health, client location, and policies that you configure. The service reacts instantly to changes in health or configuration to ensure that internet traffic from clients is always directed to healthy endpoints. For more information about understanding and using Global Accelerator, see the Global Accelerator Developer Guide",
loadSpec: "aws/globalaccelerator",
},
{
name: "glue",
description: "Glue Defines the public endpoint for the Glue service",
loadSpec: "aws/glue",
},
{
name: "grafana",
description:
"Amazon Managed Grafana is a fully managed and secure data visualization service that you can use to instantly query, correlate, and visualize operational metrics, logs, and traces from multiple sources. Amazon Managed Grafana makes it easy to deploy, operate, and scale Grafana, a widely deployed data visualization tool that is popular for its extensible data support. With Amazon Managed Grafana, you create logically isolated Grafana servers called workspaces. In a workspace, you can create Grafana dashboards and visualizations to analyze your metrics, logs, and traces without having to build, package, or deploy any hardware to run Grafana servers",
loadSpec: "aws/grafana",
},
{
name: "greengrass",
description:
"AWS IoT Greengrass seamlessly extends AWS onto physical devices so they can act locally on the data they generate, while still using the cloud for management, analytics, and durable storage. AWS IoT Greengrass ensures your devices can respond quickly to local events and operate with intermittent connectivity. AWS IoT Greengrass minimizes the cost of transmitting data to the cloud by allowing you to author AWS Lambda functions that execute locally",
loadSpec: "aws/greengrass",
},
{
name: "greengrassv2",
description:
"IoT Greengrass brings local compute, messaging, data management, sync, and ML inference capabilities to edge devices. This enables devices to collect and analyze data closer to the source of information, react autonomously to local events, and communicate securely with each other on local networks. Local devices can also communicate securely with Amazon Web Services IoT Core and export IoT data to the Amazon Web Services Cloud. IoT Greengrass developers can use Lambda functions and components to create and deploy applications to fleets of edge devices for local operation. IoT Greengrass Version 2 provides a new major version of the IoT Greengrass Core software, new APIs, and a new console. Use this API reference to learn how to use the IoT Greengrass V2 API operations to manage components, manage deployments, and core devices. For more information, see What is IoT Greengrass? in the IoT Greengrass V2 Developer Guide",
loadSpec: "aws/greengrassv2",
},
{
name: "groundstation",
description:
"Welcome to the AWS Ground Station API Reference. AWS Ground Station is a fully managed service that enables you to control satellite communications, downlink and process satellite data, and scale your satellite operations efficiently and cost-effectively without having to build or manage your own ground station infrastructure",
loadSpec: "aws/groundstation",
},
{
name: "guardduty",
description: