{ "version": "2.0", "service": "Defines service operations used by the GlueFrontendService", "operations": { "BatchCreatePartition": "

Creates one or more partitions in a batch operation.

", "BatchDeleteConnection": "

Deletes a list of connection definitions from the Data Catalog.

", "BatchDeletePartition": "

Deletes one or more partitions in a batch operation.

", "BatchDeleteTable": "

Deletes multiple tables at once.

", "BatchGetPartition": "

Retrieves partitions in a batch request.

", "CreateClassifier": "

Creates a Classifier in the user's account.

", "CreateConnection": "

Creates a connection definition in the Data Catalog.

", "CreateCrawler": "

Creates a new Crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in either the s3Targets or the jdbcTargets field.

", "CreateDatabase": "

Creates a new database in a Data Catalog.

", "CreateDevEndpoint": "

Creates a new DevEndpoint.

", "CreateJob": "

Creates a new job.

", "CreatePartition": "

Creates a new partition.

", "CreateScript": "

Transforms a directed acyclic graph (DAG) into a Python script.

", "CreateTable": "

Creates a new table definition in the Data Catalog.

", "CreateTrigger": "

Creates a new trigger.

", "CreateUserDefinedFunction": "

Creates a new function definition in the Data Catalog.

", "DeleteClassifier": "

Removes a Classifier from the metadata store.

", "DeleteConnection": "

Deletes a connection from the Data Catalog.

", "DeleteCrawler": "

Removes a specified Crawler from the metadata store, unless the Crawler state is RUNNING.

", "DeleteDatabase": "

Removes a specified Database from a Data Catalog.

", "DeleteDevEndpoint": "

Deletes a specified DevEndpoint.

", "DeleteJob": "

Deletes a specified job.

", "DeletePartition": "

Deletes a specified partition.

", "DeleteTable": "

Removes a table definition from the Data Catalog.

", "DeleteTrigger": "

Deletes a specified trigger.

", "DeleteUserDefinedFunction": "

Deletes an existing function definition from the Data Catalog.

", "GetCatalogImportStatus": "

Retrieves the status of a migration operation.

", "GetClassifier": "

Retrieve a Classifier by name.

", "GetClassifiers": "

Lists all Classifier objects in the metadata store.

", "GetConnection": "

Retrieves a connection definition from the Data Catalog.

", "GetConnections": "

Retrieves a list of connection definitions from the Data Catalog.

", "GetCrawler": "

Retrieves metadata for a specified Crawler.

", "GetCrawlerMetrics": "

Retrieves metrics about specified crawlers.

", "GetCrawlers": "

Retrieves metadata for all Crawlers defined in the customer account.

", "GetDatabase": "

Retrieves the definition of a specified database.

", "GetDatabases": "

Retrieves all Databases defined in a given Data Catalog.

", "GetDataflowGraph": "

Transforms a Python script into a directed acyclic graph (DAG).

", "GetDevEndpoint": "

Retrieves information about a specified DevEndpoint.

", "GetDevEndpoints": "

Retrieves all the DevEndpoints in this AWS account.

", "GetJob": "

Retrieves an existing job definition.

", "GetJobRun": "

Retrieves the metadata for a given job run.

", "GetJobRuns": "

Retrieves metadata for all runs of a given job.

", "GetJobs": "

Retrieves all current jobs.

", "GetMapping": "

Creates mappings.

", "GetPartition": "

Retrieves information about a specified partition.

", "GetPartitions": "

Retrieves information about the partitions in a table.

", "GetPlan": "

Gets a Python script to perform a specified mapping.

", "GetTable": "

Retrieves the Table definition in a Data Catalog for a specified table.

", "GetTableVersions": "

Retrieves a list of strings that identify available versions of a specified table.

", "GetTables": "

Retrieves the definitions of some or all of the tables in a given Database.

", "GetTrigger": "

Retrieves the definition of a trigger.

", "GetTriggers": "

Gets all the triggers associated with a job.

", "GetUserDefinedFunction": "

Retrieves a specified function definition from the Data Catalog.

", "GetUserDefinedFunctions": "

Retrieves a multiple function definitions from the Data Catalog.

", "ImportCatalogToGlue": "

Imports an existing Athena Data Catalog to AWS Glue

", "ResetJobBookmark": "

Resets a bookmark entry.

", "StartCrawler": "

Starts a crawl using the specified Crawler, regardless of what is scheduled. If the Crawler is already running, does nothing.

", "StartCrawlerSchedule": "

Changes the schedule state of the specified crawler to SCHEDULED, unless the crawler is already running or the schedule state is already SCHEDULED.

", "StartJobRun": "

Runs a job.

", "StartTrigger": "

Starts an existing trigger.

", "StopCrawler": "

If the specified Crawler is running, stops the crawl.

", "StopCrawlerSchedule": "

Sets the schedule state of the specified crawler to NOT_SCHEDULED, but does not stop the crawler if it is already running.

", "StopTrigger": "

Stops a specified trigger.

", "UpdateClassifier": "

Modifies an existing Classifier.

", "UpdateConnection": "

Updates a connection definition in the Data Catalog.

", "UpdateCrawler": "

Updates a Crawler. If a Crawler is running, you must stop it using StopCrawler before updating it.

", "UpdateCrawlerSchedule": "

Updates the schedule of a crawler using a Cron expression.

", "UpdateDatabase": "

Updates an existing database definition in a Data Catalog.

", "UpdateDevEndpoint": "

Updates a specified DevEndpoint.

", "UpdateJob": "

Updates an existing job definition.

", "UpdatePartition": "

Updates a partition.

", "UpdateTable": "

Updates a metadata table in the Data Catalog.

", "UpdateTrigger": "

Updates a trigger definition.

", "UpdateUserDefinedFunction": "

Updates an existing function definition in the Data Catalog.

" }, "shapes": { "AccessDeniedException": { "base": "

Access to a resource was denied.

", "refs": { } }, "Action": { "base": null, "refs": { "ActionList$member": null } }, "ActionList": { "base": null, "refs": { "CreateTriggerRequest$Actions": "

The actions initiated by this trigger when it fires.

", "Trigger$Actions": "

The actions initiated by this trigger.

", "TriggerUpdate$Actions": "

The actions initiated by this trigger.

" } }, "AlreadyExistsException": { "base": "

A resource to be created or added already exists.

", "refs": { } }, "AttemptCount": { "base": null, "refs": { "JobRun$Attempt": "

The number or the attempt to run this job.

" } }, "BatchCreatePartitionRequest": { "base": null, "refs": { } }, "BatchCreatePartitionResponse": { "base": null, "refs": { } }, "BatchDeleteConnectionRequest": { "base": null, "refs": { } }, "BatchDeleteConnectionResponse": { "base": null, "refs": { } }, "BatchDeletePartitionRequest": { "base": null, "refs": { } }, "BatchDeletePartitionResponse": { "base": null, "refs": { } }, "BatchDeletePartitionValueList": { "base": null, "refs": { "BatchDeletePartitionRequest$PartitionsToDelete": "

A list of PartitionInput structures that define the partitions to be deleted.

" } }, "BatchDeleteTableNameList": { "base": null, "refs": { "BatchDeleteTableRequest$TablesToDelete": "

A list of the table to delete.

" } }, "BatchDeleteTableRequest": { "base": null, "refs": { } }, "BatchDeleteTableResponse": { "base": null, "refs": { } }, "BatchGetPartitionRequest": { "base": null, "refs": { } }, "BatchGetPartitionResponse": { "base": null, "refs": { } }, "BatchGetPartitionValueList": { "base": null, "refs": { "BatchGetPartitionRequest$PartitionsToGet": "

A list of partition values identifying the partitions to retrieve.

", "BatchGetPartitionResponse$UnprocessedKeys": "

A list of the partition values in the request for which partions were not returned.

" } }, "Boolean": { "base": null, "refs": { "CatalogImportStatus$ImportCompleted": "

True if the migration has completed, or False otherwise.

", "CodeGenNodeArg$Param": "

True if the value is used as a parameter.

", "CrawlerMetrics$StillEstimating": "

True if the crawler is estimating its

", "StorageDescriptor$Compressed": "

True if the data in the table is compressed, or False if not.

", "StorageDescriptor$StoredAsSubDirectories": "

True if the table data is stored in subdirectories, or False if not.

" } }, "BooleanValue": { "base": null, "refs": { "GetJobRunRequest$PredecessorsIncluded": "

A list of the predecessor runs to return as well.

" } }, "BoundedPartitionValueList": { "base": null, "refs": { "UpdatePartitionRequest$PartitionValueList": "

A list of the values defining the partition.

" } }, "CatalogEntries": { "base": null, "refs": { "GetMappingRequest$Sinks": "

A list of target tables.

", "GetPlanRequest$Sinks": "

The target tables.

" } }, "CatalogEntry": { "base": "

Specifies a table definition in the Data Catalog.

", "refs": { "CatalogEntries$member": null, "GetMappingRequest$Source": "

Specifies the source table.

", "GetPlanRequest$Source": "

The source table.

" } }, "CatalogIdString": { "base": null, "refs": { "BatchCreatePartitionRequest$CatalogId": "

The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.

", "BatchDeleteConnectionRequest$CatalogId": "

The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.

", "BatchDeletePartitionRequest$CatalogId": "

The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.

", "BatchDeleteTableRequest$CatalogId": "

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

", "BatchGetPartitionRequest$CatalogId": "

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

", "CreateConnectionRequest$CatalogId": "

The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default.

", "CreateDatabaseRequest$CatalogId": "

The ID of the Data Catalog in which to create the database. If none is supplied, the AWS account ID is used by default.

", "CreatePartitionRequest$CatalogId": "

The ID of the catalog in which the partion is to be created. Currently, this should be the AWS account ID.

", "CreateTableRequest$CatalogId": "

The ID of the Data Catalog in which to create the Table. If none is supplied, the AWS account ID is used by default.

", "CreateUserDefinedFunctionRequest$CatalogId": "

The ID of the Data Catalog in which to create the function. If none is supplied, the AWS account ID is used by default.

", "DeleteConnectionRequest$CatalogId": "

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

", "DeleteDatabaseRequest$CatalogId": "

The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.

", "DeletePartitionRequest$CatalogId": "

The ID of the Data Catalog where the partition to be deleted resides. If none is supplied, the AWS account ID is used by default.

", "DeleteTableRequest$CatalogId": "

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

", "DeleteUserDefinedFunctionRequest$CatalogId": "

The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the AWS account ID is used by default.

", "GetCatalogImportStatusRequest$CatalogId": "

The ID of the catalog to migrate. Currently, this should be the AWS account ID.

", "GetConnectionRequest$CatalogId": "

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

", "GetConnectionsRequest$CatalogId": "

The ID of the Data Catalog in which the connections reside. If none is supplied, the AWS account ID is used by default.

", "GetDatabaseRequest$CatalogId": "

The ID of the Data Catalog in which the database resides. If none is supplied, the AWS account ID is used by default.

", "GetDatabasesRequest$CatalogId": "

The ID of the Data Catalog from which to retrieve Databases. If none is supplied, the AWS account ID is used by default.

", "GetPartitionRequest$CatalogId": "

The ID of the Data Catalog where the partition in question resides. If none is supplied, the AWS account ID is used by default.

", "GetPartitionsRequest$CatalogId": "

The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.

", "GetTableRequest$CatalogId": "

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

", "GetTableVersionsRequest$CatalogId": "

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

", "GetTablesRequest$CatalogId": "

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

", "GetUserDefinedFunctionRequest$CatalogId": "

The ID of the Data Catalog where the function to be retrieved is located. If none is supplied, the AWS account ID is used by default.

", "GetUserDefinedFunctionsRequest$CatalogId": "

The ID of the Data Catalog where the functions to be retrieved are located. If none is supplied, the AWS account ID is used by default.

", "ImportCatalogToGlueRequest$CatalogId": "

The ID of the catalog to import. Currently, this should be the AWS account ID.

", "UpdateConnectionRequest$CatalogId": "

The ID of the Data Catalog in which the connection resides. If none is supplied, the AWS account ID is used by default.

", "UpdateDatabaseRequest$CatalogId": "

The ID of the Data Catalog in which the metadata database resides. If none is supplied, the AWS account ID is used by default.

", "UpdatePartitionRequest$CatalogId": "

The ID of the Data Catalog where the partition to be updated resides. If none is supplied, the AWS account ID is used by default.

", "UpdateTableRequest$CatalogId": "

The ID of the Data Catalog where the table resides. If none is supplied, the AWS account ID is used by default.

", "UpdateUserDefinedFunctionRequest$CatalogId": "

The ID of the Data Catalog where the function to be updated is located. If none is supplied, the AWS account ID is used by default.

" } }, "CatalogImportStatus": { "base": "

A structure containing migration status information.

", "refs": { "GetCatalogImportStatusResponse$ImportStatus": "

The status of the specified catalog migration.

" } }, "Classification": { "base": null, "refs": { "CreateGrokClassifierRequest$Classification": "

The type of result that the classifier matches, such as Twitter Json, Omniture logs, Cloudwatch logs, and so forth.

", "GrokClassifier$Classification": "

The data form that the classifier matches, such as Twitter, JSON, Omniture Logs, and so forth.

", "UpdateGrokClassifierRequest$Classification": "

The type of result that the classifier matches, such as Twitter Json, Omniture logs, Cloudwatch logs, and so forth.

" } }, "Classifier": { "base": "

Classifiers are written in Python and triggered during a Crawl Task. You can write your own Classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A Classifier first checks whether a given file is in a format it can handle, and then, if so, creates a schema in the form of a StructType object that matches that data format.

", "refs": { "ClassifierList$member": null, "GetClassifierResponse$Classifier": "

The requested Classifier.

" } }, "ClassifierList": { "base": null, "refs": { "GetClassifiersResponse$Classifiers": "

The requested list of Classifier objects.

" } }, "ClassifierNameList": { "base": null, "refs": { "Crawler$Classifiers": "

A list of custom Classifiers associated with this Crawler.

", "CreateCrawlerRequest$Classifiers": "

A list of custom Classifier names that the user has registered. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

", "UpdateCrawlerRequest$Classifiers": "

A list of custom Classifier names that the user has registered. By default, all AWS classifiers are included in a crawl, but these custom classifiers always override the default classifiers for a given classification.

" } }, "CodeGenArgName": { "base": null, "refs": { "CodeGenEdge$TargetParameter": "

The target of the edge.

", "CodeGenNodeArg$Name": "

The name of the argument or property.

" } }, "CodeGenArgValue": { "base": null, "refs": { "CodeGenNodeArg$Value": "

The value of the argument or property.

" } }, "CodeGenEdge": { "base": "

Represents a directional edge in a directed acyclic graph (DAG).

", "refs": { "DagEdges$member": null } }, "CodeGenIdentifier": { "base": null, "refs": { "CodeGenEdge$Source": "

The ID of the node at which the edge starts.

", "CodeGenEdge$Target": "

The ID of the node at which the edge ends.

", "CodeGenNode$Id": "

A node identifier that is unique within the node's graph.

" } }, "CodeGenNode": { "base": "

Represents a node in a directed acyclic graph (DAG)

", "refs": { "DagNodes$member": null } }, "CodeGenNodeArg": { "base": "

An argument or property of a node.

", "refs": { "CodeGenNodeArgs$member": null } }, "CodeGenNodeArgs": { "base": null, "refs": { "CodeGenNode$Args": "

Properties of the node, in the form of name-value pairs.

", "Location$Jdbc": "

A JDBC location.

", "Location$S3": "

An AWS S3 location.

" } }, "CodeGenNodeType": { "base": null, "refs": { "CodeGenNode$NodeType": "

The type of node this is.

" } }, "Column": { "base": "

A column in a Table.

", "refs": { "ColumnList$member": null } }, "ColumnList": { "base": null, "refs": { "StorageDescriptor$Columns": "

A list of the Columns in the table.

", "Table$PartitionKeys": "

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

", "TableInput$PartitionKeys": "

A list of columns by which the table is partitioned. Only primitive types are supported as partition keys.

" } }, "ColumnTypeString": { "base": null, "refs": { "Column$Type": "

The datatype of data in the Column.

" } }, "ColumnValueStringList": { "base": null, "refs": { "SkewedInfo$SkewedColumnValues": "

A list of values that appear so frequently as to be considered skewed.

" } }, "ColumnValuesString": { "base": null, "refs": { "ColumnValueStringList$member": null, "LocationMap$key": null, "LocationMap$value": null } }, "CommentString": { "base": null, "refs": { "Column$Comment": "

Free-form text comment.

" } }, "ConcurrentModificationException": { "base": "

Two processes are trying to modify a resource simultaneously.

", "refs": { } }, "ConcurrentRunsExceededException": { "base": "

Too many jobs are being run concurrently.

", "refs": { } }, "Condition": { "base": null, "refs": { "ConditionList$member": null } }, "ConditionList": { "base": null, "refs": { "Predicate$Conditions": "

A list of the conditions that determine when the trigger will fire.

" } }, "Connection": { "base": "

Defines a connection to a data source.

", "refs": { "ConnectionList$member": null, "GetConnectionResponse$Connection": "

The requested connection definition.

" } }, "ConnectionInput": { "base": "

A structure used to specify a connection to create or update.

", "refs": { "CreateConnectionRequest$ConnectionInput": "

A ConnectionInput object defining the connection to create.

", "UpdateConnectionRequest$ConnectionInput": "

A ConnectionInput object that redefines the connection in question.

" } }, "ConnectionList": { "base": null, "refs": { "GetConnectionsResponse$ConnectionList": "

A list of requested connection definitions.

" } }, "ConnectionName": { "base": null, "refs": { "JdbcTarget$ConnectionName": "

The name of the connection to use for the JDBC target.

" } }, "ConnectionProperties": { "base": null, "refs": { "Connection$ConnectionProperties": "

A list of key-value pairs used as parameters for this connection.

", "ConnectionInput$ConnectionProperties": "

A list of key-value pairs used as parameters for this connection.

" } }, "ConnectionPropertyKey": { "base": null, "refs": { "ConnectionProperties$key": null } }, "ConnectionType": { "base": null, "refs": { "Connection$ConnectionType": "

The type of the connection.

", "ConnectionInput$ConnectionType": "

The type of the connection.

", "GetConnectionsFilter$ConnectionType": "

The type of connections to return.

" } }, "ConnectionsList": { "base": "

Specifies the connections used by a job.

", "refs": { "CreateJobRequest$Connections": "

The connections used for this job.

", "Job$Connections": "

The connections used for this job.

", "JobUpdate$Connections": "

The connections used for this job.

" } }, "Crawler": { "base": "

Specifies a crawler program that examines a data source and uses classifiers to try to its schema. If successful, the crawler records metatdata concerning the data source in the Data Catalog.

", "refs": { "CrawlerList$member": null, "GetCrawlerResponse$Crawler": "

The metadata for the specified Crawler.

" } }, "CrawlerList": { "base": null, "refs": { "GetCrawlersResponse$Crawlers": "

A list of Crawler metadata.

" } }, "CrawlerMetrics": { "base": "

Metrics for a specified crawler.

", "refs": { "CrawlerMetricsList$member": null } }, "CrawlerMetricsList": { "base": null, "refs": { "GetCrawlerMetricsResponse$CrawlerMetricsList": "

A list of metrics for the specified crawler.

" } }, "CrawlerNameList": { "base": null, "refs": { "GetCrawlerMetricsRequest$CrawlerNameList": "

A list of the names of crawlers about which to retrieve metrics.

" } }, "CrawlerNotRunningException": { "base": "

The specified crawler is not running.

", "refs": { } }, "CrawlerRunningException": { "base": "

The operation cannot be performed because the crawler is already running.

", "refs": { } }, "CrawlerState": { "base": null, "refs": { "Crawler$State": "

Indicates whether this Crawler is running, or whether a run is pending.

" } }, "CrawlerStoppingException": { "base": "

The specified crawler is stopping.

", "refs": { } }, "CrawlerTargets": { "base": "

Specifies crawler targets.

", "refs": { "Crawler$Targets": "

A collection of targets to crawl.

", "CreateCrawlerRequest$Targets": "

A list of collection of targets to crawl.

", "UpdateCrawlerRequest$Targets": "

A list of collection of targets to crawl.

" } }, "CreateClassifierRequest": { "base": null, "refs": { } }, "CreateClassifierResponse": { "base": null, "refs": { } }, "CreateConnectionRequest": { "base": null, "refs": { } }, "CreateConnectionResponse": { "base": null, "refs": { } }, "CreateCrawlerRequest": { "base": null, "refs": { } }, "CreateCrawlerResponse": { "base": null, "refs": { } }, "CreateDatabaseRequest": { "base": null, "refs": { } }, "CreateDatabaseResponse": { "base": null, "refs": { } }, "CreateDevEndpointRequest": { "base": null, "refs": { } }, "CreateDevEndpointResponse": { "base": null, "refs": { } }, "CreateGrokClassifierRequest": { "base": "

Specifies a Grok classifier for CreateClassifier to create.

", "refs": { "CreateClassifierRequest$GrokClassifier": "

A grok classifier to create.

" } }, "CreateJobRequest": { "base": null, "refs": { } }, "CreateJobResponse": { "base": null, "refs": { } }, "CreatePartitionRequest": { "base": null, "refs": { } }, "CreatePartitionResponse": { "base": null, "refs": { } }, "CreateScriptRequest": { "base": null, "refs": { } }, "CreateScriptResponse": { "base": null, "refs": { } }, "CreateTableRequest": { "base": null, "refs": { } }, "CreateTableResponse": { "base": null, "refs": { } }, "CreateTriggerRequest": { "base": null, "refs": { } }, "CreateTriggerResponse": { "base": null, "refs": { } }, "CreateUserDefinedFunctionRequest": { "base": null, "refs": { } }, "CreateUserDefinedFunctionResponse": { "base": null, "refs": { } }, "CronExpression": { "base": null, "refs": { "CreateCrawlerRequest$Schedule": "

A cron expression that can be used as a Cloudwatch event (see CloudWatch Schedule Expression Syntax. For example, to run every day at 12:15 UTC, specify: cron(15 12 * * ? *).

", "Schedule$ScheduleExpression": "

A cron expression that can be used as a Cloudwatch event to schedule something (see CloudWatch Schedule Expression Syntax. For example, to run something every day at 12:15 UTC, you would specify: cron(15 12 * * ? *).

", "UpdateCrawlerRequest$Schedule": "

A cron expression that can be used as a Cloudwatch event (see CloudWatch Schedule Expression Syntax. For example, to run every day at 12:15 UTC, specify: cron(15 12 * * ? *).

", "UpdateCrawlerScheduleRequest$Schedule": "

Cron expression of the updated schedule.

" } }, "CustomPatterns": { "base": null, "refs": { "CreateGrokClassifierRequest$CustomPatterns": "

Custom grok patterns used by this classifier.

", "GrokClassifier$CustomPatterns": "

Custom grok patterns used by this classifier.

", "UpdateGrokClassifierRequest$CustomPatterns": "

Custom grok patterns used by this classifier.

" } }, "DagEdges": { "base": null, "refs": { "CreateScriptRequest$DagEdges": "

A list of the edges in the DAG.

", "GetDataflowGraphResponse$DagEdges": "

A list of the edges in the resulting DAG.

" } }, "DagNodes": { "base": null, "refs": { "CreateScriptRequest$DagNodes": "

A list of the nodes in the DAG.

", "GetDataflowGraphResponse$DagNodes": "

A list of the nodes in the resulting DAG.

" } }, "Database": { "base": "

The Database object represents a logical grouping of tables that may reside in a Hive metastore or an RDBMS.

", "refs": { "DatabaseList$member": null, "GetDatabaseResponse$Database": "

The definition of the specified database in the catalog.

" } }, "DatabaseInput": { "base": "

The structure used to create or updata a database.

", "refs": { "CreateDatabaseRequest$DatabaseInput": "

A DatabaseInput object defining the metadata database to create in the catalog.

", "UpdateDatabaseRequest$DatabaseInput": "

A DatabaseInput object specifying the new definition of the metadata database in the catalog.

" } }, "DatabaseList": { "base": null, "refs": { "GetDatabasesResponse$DatabaseList": "

A list of Database objects from the specified catalog.

" } }, "DatabaseName": { "base": null, "refs": { "Crawler$DatabaseName": "

The Database where this Crawler's output should be stored.

", "CreateCrawlerRequest$DatabaseName": "

The Glue Database where results will be stored, such as: arn:aws:daylight:us-east-1::database/sometable/*.

", "UpdateCrawlerRequest$DatabaseName": "

The Glue Database where results will be stored, such as: arn:aws:daylight:us-east-1::database/sometable/*.

" } }, "DeleteBehavior": { "base": null, "refs": { "SchemaChangePolicy$DeleteBehavior": "

The deletion behavior.

" } }, "DeleteClassifierRequest": { "base": null, "refs": { } }, "DeleteClassifierResponse": { "base": null, "refs": { } }, "DeleteConnectionNameList": { "base": null, "refs": { "BatchDeleteConnectionRequest$ConnectionNameList": "

A list of names of the connections to delete.

" } }, "DeleteConnectionRequest": { "base": null, "refs": { } }, "DeleteConnectionResponse": { "base": null, "refs": { } }, "DeleteCrawlerRequest": { "base": null, "refs": { } }, "DeleteCrawlerResponse": { "base": null, "refs": { } }, "DeleteDatabaseRequest": { "base": null, "refs": { } }, "DeleteDatabaseResponse": { "base": null, "refs": { } }, "DeleteDevEndpointRequest": { "base": null, "refs": { } }, "DeleteDevEndpointResponse": { "base": null, "refs": { } }, "DeleteJobRequest": { "base": null, "refs": { } }, "DeleteJobResponse": { "base": null, "refs": { } }, "DeletePartitionRequest": { "base": null, "refs": { } }, "DeletePartitionResponse": { "base": null, "refs": { } }, "DeleteTableRequest": { "base": null, "refs": { } }, "DeleteTableResponse": { "base": null, "refs": { } }, "DeleteTriggerRequest": { "base": null, "refs": { } }, "DeleteTriggerResponse": { "base": null, "refs": { } }, "DeleteUserDefinedFunctionRequest": { "base": null, "refs": { } }, "DeleteUserDefinedFunctionResponse": { "base": null, "refs": { } }, "DescriptionString": { "base": null, "refs": { "Connection$Description": "

Description of the connection.

", "ConnectionInput$Description": "

Description of the connection.

", "Crawler$Description": "

A description of this Crawler and where it should be used.

", "CreateCrawlerRequest$Description": "

A description of the new Crawler.

", "CreateJobRequest$Description": "

Description of the job.

", "CreateTriggerRequest$Description": "

A description of the new trigger.

", "Database$Description": "

Description of the database.

", "DatabaseInput$Description": "

Description of the database

", "ErrorDetail$ErrorMessage": "

A message describing the error.

", "Job$Description": "

Description of this job.

", "JobUpdate$Description": "

Description of the job.

", "LastCrawlInfo$ErrorMessage": "

Error information about the last crawl, if an error occurred.

", "Table$Description": "

Description of the table.

", "TableInput$Description": "

Description of the table.

", "Trigger$Description": "

A description of this trigger.

", "TriggerUpdate$Description": "

A description of this trigger.

" } }, "DescriptionStringRemovable": { "base": null, "refs": { "UpdateCrawlerRequest$Description": "

A description of the new Crawler.

" } }, "DevEndpoint": { "base": "

A development endpoint where a developer can remotely debug ETL scripts.

", "refs": { "DevEndpointList$member": null, "GetDevEndpointResponse$DevEndpoint": "

A DevEndpoint definition.

" } }, "DevEndpointCustomLibraries": { "base": "

Custom libraries to be loaded into a DevEndpoint.

", "refs": { "UpdateDevEndpointRequest$CustomLibraries": "

Custom Python or Java custom libraries to be loaded in the DevEndpoint.

" } }, "DevEndpointList": { "base": null, "refs": { "GetDevEndpointsResponse$DevEndpoints": "

A list of DevEndpoint definitions.

" } }, "EntityNotFoundException": { "base": "

A specified entity does not exist

", "refs": { } }, "ErrorByName": { "base": null, "refs": { "BatchDeleteConnectionResponse$Errors": "

A map of the names of connections that were not successfully deleted to error details.

" } }, "ErrorDetail": { "base": "

Contains details about an error.

", "refs": { "ErrorByName$value": null, "PartitionError$ErrorDetail": "

Details about the partition error.

", "TableError$ErrorDetail": "

Detail about the error.

" } }, "ErrorString": { "base": null, "refs": { "JobRun$ErrorMessage": "

An error message associated with this job run.

" } }, "ExecutionProperty": { "base": "

An execution property of a job.

", "refs": { "CreateJobRequest$ExecutionProperty": "

An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.

", "Job$ExecutionProperty": "

An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.

", "JobUpdate$ExecutionProperty": "

An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.

" } }, "FieldType": { "base": null, "refs": { "MappingEntry$SourceType": "

The source type.

", "MappingEntry$TargetType": "

The target type.

" } }, "FilterString": { "base": null, "refs": { "GetTablesRequest$Expression": "

A regular expression pattern. If present, only those tables whose names match the pattern are returned.

" } }, "FormatString": { "base": null, "refs": { "StorageDescriptor$InputFormat": "

The input format: SequenceFileInputFormat (binary), or TextInputFormat, or a custom format.

", "StorageDescriptor$OutputFormat": "

The output format: SequenceFileOutputFormat (binary), or IgnoreKeyTextOutputFormat, or a custom format.

" } }, "GenericMap": { "base": null, "refs": { "Action$Arguments": null, "CreateJobRequest$DefaultArguments": "

The default parameters for this job.

", "Job$DefaultArguments": "

The default parameters for this job.

", "JobRun$Arguments": "

The job arguments associated with this run.

", "JobUpdate$DefaultArguments": "

The default parameters for this job.

", "StartJobRunRequest$Arguments": "

Specific arguments for this job run.

" } }, "GenericString": { "base": null, "refs": { "CreateDevEndpointRequest$EndpointName": "

The name to be assigned to the new DevEndpoint.

", "CreateDevEndpointRequest$SubnetId": "

The subnet ID for the new DevEndpoint to use.

", "CreateDevEndpointRequest$PublicKey": "

The public key to use for authentication.

", "CreateDevEndpointRequest$ExtraPythonLibsS3Path": "

Path to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint.

", "CreateDevEndpointRequest$ExtraJarsS3Path": "

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

", "CreateDevEndpointResponse$EndpointName": "

The name assigned to the new DevEndpoint.

", "CreateDevEndpointResponse$Status": "

The current status of the new DevEndpoint.

", "CreateDevEndpointResponse$SubnetId": "

The subnet ID assigned to the new DevEndpoint.

", "CreateDevEndpointResponse$YarnEndpointAddress": "

The address of the YARN endpoint used by this DevEndpoint.

", "CreateDevEndpointResponse$AvailabilityZone": "

The AWS availability zone where this DevEndpoint is located.

", "CreateDevEndpointResponse$VpcId": "

The ID of the VPC used by this DevEndpoint.

", "CreateDevEndpointResponse$ExtraPythonLibsS3Path": "

Path to one or more Python libraries in an S3 bucket that will be loaded in your DevEndpoint.

", "CreateDevEndpointResponse$ExtraJarsS3Path": "

Path to one or more Java Jars in an S3 bucket that will be loaded in your DevEndpoint.

", "CreateDevEndpointResponse$FailureReason": "

The reason for a current failure in this DevEndpoint.

", "CreateTriggerRequest$Schedule": "

A cron schedule expression for the new trigger.

", "DeleteDevEndpointRequest$EndpointName": "

The name of the DevEndpoint.

", "DevEndpoint$EndpointName": "

The name of the DevEndpoint.

", "DevEndpoint$SubnetId": "

The subnet ID for this DevEndpoint.

", "DevEndpoint$YarnEndpointAddress": "

The YARN endpoint address used by this DevEndpoint.

", "DevEndpoint$PublicAddress": "

The public address used by this DevEndpoint.

", "DevEndpoint$Status": "

The current status of this DevEndpoint.

", "DevEndpoint$AvailabilityZone": "

The AWS availability zone where this DevEndpoint is located.

", "DevEndpoint$VpcId": "

The ID of the virtual private cloud (VPC) used by this DevEndpoint.

", "DevEndpoint$ExtraPythonLibsS3Path": "

Path to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint.

", "DevEndpoint$ExtraJarsS3Path": "

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

", "DevEndpoint$FailureReason": "

The reason for a current failure in this DevEndpoint.

", "DevEndpoint$LastUpdateStatus": "

The status of the last update.

", "DevEndpoint$PublicKey": "

The public key to be used by this DevEndpoint for authentication.

", "DevEndpointCustomLibraries$ExtraPythonLibsS3Path": "

Path to one or more Python libraries in an S3 bucket that should be loaded in your DevEndpoint.

", "DevEndpointCustomLibraries$ExtraJarsS3Path": "

Path to one or more Java Jars in an S3 bucket that should be loaded in your DevEndpoint.

", "GenericMap$key": null, "GenericMap$value": null, "GetDevEndpointRequest$EndpointName": "

Name of the DevEndpoint for which to retrieve information.

", "GetDevEndpointsRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetDevEndpointsResponse$NextToken": "

A continuation token, if not all DevEndpoint definitions have yet been returned.

", "GetJobRunsRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetJobRunsResponse$NextToken": "

A continuation token, if not all reequested job runs have been returned.

", "GetJobsRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetJobsResponse$NextToken": "

A continuation token, if not all jobs have yet been returned.

", "GetTriggersRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetTriggersResponse$NextToken": "

A continuation token, if not all the requested triggers have yet been returned.

", "JobCommand$Name": "

The name of this job command.

", "StringList$member": null, "Trigger$Schedule": "

A cron schedule expression.

", "TriggerUpdate$Schedule": "

A cron expression specifying the schedule.

", "UpdateDevEndpointRequest$EndpointName": "

The name of the DevEndpoint to be updated.

", "UpdateDevEndpointRequest$PublicKey": "

The public key for the DevEndpoint to use.

" } }, "GetCatalogImportStatusRequest": { "base": null, "refs": { } }, "GetCatalogImportStatusResponse": { "base": null, "refs": { } }, "GetClassifierRequest": { "base": null, "refs": { } }, "GetClassifierResponse": { "base": null, "refs": { } }, "GetClassifiersRequest": { "base": null, "refs": { } }, "GetClassifiersResponse": { "base": null, "refs": { } }, "GetConnectionRequest": { "base": null, "refs": { } }, "GetConnectionResponse": { "base": null, "refs": { } }, "GetConnectionsFilter": { "base": "

Filters the connection definitions returned by the GetConnections API.

", "refs": { "GetConnectionsRequest$Filter": "

A filter that controls which connections will be returned.

" } }, "GetConnectionsRequest": { "base": null, "refs": { } }, "GetConnectionsResponse": { "base": null, "refs": { } }, "GetCrawlerMetricsRequest": { "base": null, "refs": { } }, "GetCrawlerMetricsResponse": { "base": null, "refs": { } }, "GetCrawlerRequest": { "base": null, "refs": { } }, "GetCrawlerResponse": { "base": null, "refs": { } }, "GetCrawlersRequest": { "base": null, "refs": { } }, "GetCrawlersResponse": { "base": null, "refs": { } }, "GetDatabaseRequest": { "base": null, "refs": { } }, "GetDatabaseResponse": { "base": null, "refs": { } }, "GetDatabasesRequest": { "base": null, "refs": { } }, "GetDatabasesResponse": { "base": null, "refs": { } }, "GetDataflowGraphRequest": { "base": null, "refs": { } }, "GetDataflowGraphResponse": { "base": null, "refs": { } }, "GetDevEndpointRequest": { "base": null, "refs": { } }, "GetDevEndpointResponse": { "base": null, "refs": { } }, "GetDevEndpointsRequest": { "base": null, "refs": { } }, "GetDevEndpointsResponse": { "base": null, "refs": { } }, "GetJobRequest": { "base": null, "refs": { } }, "GetJobResponse": { "base": null, "refs": { } }, "GetJobRunRequest": { "base": null, "refs": { } }, "GetJobRunResponse": { "base": null, "refs": { } }, "GetJobRunsRequest": { "base": null, "refs": { } }, "GetJobRunsResponse": { "base": null, "refs": { } }, "GetJobsRequest": { "base": null, "refs": { } }, "GetJobsResponse": { "base": null, "refs": { } }, "GetMappingRequest": { "base": null, "refs": { } }, "GetMappingResponse": { "base": null, "refs": { } }, "GetPartitionRequest": { "base": null, "refs": { } }, "GetPartitionResponse": { "base": null, "refs": { } }, "GetPartitionsRequest": { "base": null, "refs": { } }, "GetPartitionsResponse": { "base": null, "refs": { } }, "GetPlanRequest": { "base": null, "refs": { } }, "GetPlanResponse": { "base": null, "refs": { } }, "GetTableRequest": { "base": null, "refs": { } }, "GetTableResponse": { "base": null, "refs": { } }, "GetTableVersionsList": { "base": null, "refs": { "GetTableVersionsResponse$TableVersions": "

A list of strings identifying available versions of the specified table.

" } }, "GetTableVersionsRequest": { "base": null, "refs": { } }, "GetTableVersionsResponse": { "base": null, "refs": { } }, "GetTablesRequest": { "base": null, "refs": { } }, "GetTablesResponse": { "base": null, "refs": { } }, "GetTriggerRequest": { "base": null, "refs": { } }, "GetTriggerResponse": { "base": null, "refs": { } }, "GetTriggersRequest": { "base": null, "refs": { } }, "GetTriggersResponse": { "base": null, "refs": { } }, "GetUserDefinedFunctionRequest": { "base": null, "refs": { } }, "GetUserDefinedFunctionResponse": { "base": null, "refs": { } }, "GetUserDefinedFunctionsRequest": { "base": null, "refs": { } }, "GetUserDefinedFunctionsResponse": { "base": null, "refs": { } }, "GrokClassifier": { "base": "

A classifier that uses grok.

", "refs": { "Classifier$GrokClassifier": "

A GrokClassifier object.

" } }, "GrokPattern": { "base": null, "refs": { "CreateGrokClassifierRequest$GrokPattern": "

The grok pattern used by this classifier.

", "GrokClassifier$GrokPattern": "

The grok pattern used by this classifier.

", "UpdateGrokClassifierRequest$GrokPattern": "

The grok pattern used by this classifier.

" } }, "IdString": { "base": null, "refs": { "GetJobRunRequest$RunId": "

The ID of the job run.

", "JobRun$Id": "

The ID of this job run.

", "JobRun$PreviousRunId": "

The ID of the previous run of this job.

", "Predecessor$RunId": "

The job-run ID of the precessor job run.

", "StartJobRunRequest$JobRunId": "

The ID of the job run to start.

", "StartJobRunResponse$JobRunId": "

The ID assigned to this job run.

", "Trigger$Id": "

The trigger ID.

" } }, "IdempotentParameterMismatchException": { "base": "

The same unique identifier was associated with two different records.

", "refs": { } }, "ImportCatalogToGlueRequest": { "base": null, "refs": { } }, "ImportCatalogToGlueResponse": { "base": null, "refs": { } }, "Integer": { "base": null, "refs": { "CodeGenNode$LineNumber": "

The line number of the node.

", "StorageDescriptor$NumberOfBuckets": "

Must be specified if the table contains any dimension columns.

" } }, "IntegerFlag": { "base": null, "refs": { "Order$SortOrder": "

Indicates that the column is sorted in ascending order (== 1), or in descending order (==0).

" } }, "IntegerValue": { "base": null, "refs": { "CreateDevEndpointRequest$NumberOfNodes": "

The number of nodes to use.

", "CreateDevEndpointResponse$NumberOfNodes": "

The number of nodes in this DevEndpoint.

", "CreateJobRequest$AllocatedCapacity": "

The number of capacity units allocated to this job.

", "DevEndpoint$NumberOfNodes": "

The number of nodes used by this DevEndpoint.

", "Job$AllocatedCapacity": "

The number of capacity units allocated to this job.

", "JobBookmarkEntry$Version": "

Version of the job.

", "JobBookmarkEntry$Run": "

The run ID number.

", "JobBookmarkEntry$Attempt": "

The attempt ID number.

", "JobRun$AllocatedCapacity": "

The amount of infrastructure capacity allocated to this job run.

", "JobUpdate$AllocatedCapacity": "

The number of capacity units allocated to this job.

", "StartJobRunRequest$AllocatedCapacity": "

The infrastructure capacity to allocate to this job.

" } }, "InternalServiceException": { "base": "

An internal service error occurred.

", "refs": { } }, "InvalidInputException": { "base": "

The input provided was not valid.

", "refs": { } }, "JdbcTarget": { "base": "

Specifies a JDBC target for a crawl.

", "refs": { "JdbcTargetList$member": null } }, "JdbcTargetList": { "base": null, "refs": { "CrawlerTargets$JdbcTargets": "

Specifies JDBC targets.

" } }, "Job": { "base": "

Specifies a job in the Data Catalog.

", "refs": { "GetJobResponse$Job": "

The requested job definition.

", "JobList$member": null } }, "JobBookmarkEntry": { "base": "

Defines a point which a job can resume processing.

", "refs": { "ResetJobBookmarkResponse$JobBookmarkEntry": "

The reset bookmark entry.

" } }, "JobCommand": { "base": "

Specifies code that executes a job.

", "refs": { "CreateJobRequest$Command": "

The JobCommand that executes this job.

", "Job$Command": "

The JobCommand that executes this job.

", "JobUpdate$Command": "

The JobCommand that executes this job.

" } }, "JobList": { "base": null, "refs": { "GetJobsResponse$Jobs": "

A list of jobs.

" } }, "JobName": { "base": null, "refs": { "JobBookmarkEntry$JobName": "

Name of the job in question.

", "ResetJobBookmarkRequest$JobName": "

The name of the job in question.

" } }, "JobRun": { "base": "

Contains information about a job run.

", "refs": { "GetJobRunResponse$JobRun": "

The requested job-run metadata.

", "JobRunList$member": null } }, "JobRunList": { "base": null, "refs": { "GetJobRunsResponse$JobRuns": "

A list of job-run metatdata objects.

" } }, "JobRunState": { "base": null, "refs": { "Condition$State": null, "JobRun$JobRunState": "

The current state of the job run.

" } }, "JobUpdate": { "base": "

Specifies information used to update an existing job.

", "refs": { "UpdateJobRequest$JobUpdate": "

Specifies the values with which to update the job.

" } }, "JsonValue": { "base": null, "refs": { "JobBookmarkEntry$JobBookmark": "

The bookmark itself.

" } }, "KeyString": { "base": null, "refs": { "ParametersMap$key": null } }, "LastCrawlInfo": { "base": "

Status and error information about the most recent crawl.

", "refs": { "Crawler$LastCrawl": "

The status of the last crawl, and potentially error information if an error occurred.

" } }, "LastCrawlStatus": { "base": null, "refs": { "LastCrawlInfo$Status": "

Status of the last crawl.

" } }, "Location": { "base": "

The location of resources.

", "refs": { "GetMappingRequest$Location": "

Parameters for the mapping.

", "GetPlanRequest$Location": "

Parameters for the mapping.

" } }, "LocationMap": { "base": null, "refs": { "SkewedInfo$SkewedColumnValueLocationMaps": "

A mapping of skewed values to the columns that contain them.

" } }, "LocationString": { "base": null, "refs": { "StorageDescriptor$Location": "

The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.

" } }, "LogGroup": { "base": null, "refs": { "LastCrawlInfo$LogGroup": "

The log group for the last crawl.

" } }, "LogStream": { "base": null, "refs": { "LastCrawlInfo$LogStream": "

The log stream for the last crawl.

" } }, "Logical": { "base": null, "refs": { "Predicate$Logical": "

Currently \"OR\" is not supported.

" } }, "LogicalOperator": { "base": null, "refs": { "Condition$LogicalOperator": null } }, "MappingEntry": { "base": "

Defines a mapping.

", "refs": { "MappingList$member": null } }, "MappingList": { "base": null, "refs": { "GetMappingResponse$Mapping": "

A list of mappings to the specified targets.

", "GetPlanRequest$Mapping": "

The list of mappings from a source table to target tables.

" } }, "MatchCriteria": { "base": null, "refs": { "Connection$MatchCriteria": "

A list of criteria that can be used in selecting this connection.

", "ConnectionInput$MatchCriteria": "

A list of criteria that can be used in selecting this connection.

", "GetConnectionsFilter$MatchCriteria": "

A criteria string that must match the criteria recorded in the connection definition for that connection definition to be returned.

" } }, "MaxConcurrentRuns": { "base": null, "refs": { "ExecutionProperty$MaxConcurrentRuns": "

The maximum number of concurrent runs allowed for a job.

" } }, "MaxRetries": { "base": null, "refs": { "CreateJobRequest$MaxRetries": "

The maximum number of times to retry this job if it fails.

", "Job$MaxRetries": "

The maximum number of times to retry this job if it fails.

", "JobUpdate$MaxRetries": "

The maximum number of times to retry this job if it fails.

" } }, "MessagePrefix": { "base": null, "refs": { "LastCrawlInfo$MessagePrefix": "

The prefix for a message about this crawl.

" } }, "MessageString": { "base": null, "refs": { "AccessDeniedException$Message": "

A message describing the problem.

", "AlreadyExistsException$Message": "

A message describing the problem.

", "ConcurrentModificationException$Message": "

A message describing the problem.

", "ConcurrentRunsExceededException$Message": "

A message describing the problem.

", "CrawlerNotRunningException$Message": "

A message describing the problem.

", "CrawlerRunningException$Message": "

A message describing the problem.

", "CrawlerStoppingException$Message": "

A message describing the problem.

", "EntityNotFoundException$Message": "

A message describing the problem.

", "IdempotentParameterMismatchException$Message": "

A message describing the problem.

", "InternalServiceException$Message": "

A message describing the problem.

", "InvalidInputException$Message": "

A message describing the problem.

", "NoScheduleException$Message": "

A message describing the problem.

", "OperationTimeoutException$Message": "

A message describing the problem.

", "ResourceNumberLimitExceededException$Message": "

A message describing the problem.

", "SchedulerNotRunningException$Message": "

A message describing the problem.

", "SchedulerRunningException$Message": "

A message describing the problem.

", "SchedulerTransitioningException$Message": "

A message describing the problem.

", "ValidationException$Message": "

A message describing the problem.

", "VersionMismatchException$Message": "

A message describing the problem.

" } }, "MillisecondsCount": { "base": null, "refs": { "Crawler$CrawlElapsedTime": "

If this Crawler is running, contains the total time elapsed since the last crawl began.

" } }, "NameString": { "base": null, "refs": { "Action$JobName": null, "BatchCreatePartitionRequest$DatabaseName": "

The name of the metadata database in which the partition is to be created.

", "BatchCreatePartitionRequest$TableName": "

The name of the metadata table in which the partition is to be created.

", "BatchDeletePartitionRequest$DatabaseName": "

The name of the catalog database in which the table in question resides.

", "BatchDeletePartitionRequest$TableName": "

The name of the table where the partitions to be deleted is located.

", "BatchDeleteTableNameList$member": null, "BatchDeleteTableRequest$DatabaseName": "

The name of the catalog database where the tables to delete reside.

", "BatchGetPartitionRequest$DatabaseName": "

The name of the catalog database where the partitions reside.

", "BatchGetPartitionRequest$TableName": "

The name of the partitions' table.

", "CatalogEntry$DatabaseName": "

The database in which the table metadata resides.

", "CatalogEntry$TableName": "

The name of the table in question.

", "CatalogImportStatus$ImportedBy": "

The name of the person who initiated the migration.

", "ClassifierNameList$member": null, "Column$Name": "

The name of the Column.

", "Condition$JobName": null, "Connection$Name": "

The name of the connection definition.

", "Connection$LastUpdatedBy": "

The user, group or role that last updated this connection definition.

", "ConnectionInput$Name": "

The name of the connection.

", "Crawler$Name": "

The Crawler name.

", "CrawlerMetrics$CrawlerName": "

The name of the crawler.

", "CrawlerNameList$member": null, "CreateCrawlerRequest$Name": "

Name of the new Crawler.

", "CreateGrokClassifierRequest$Name": "

The name of the new Classifier.

", "CreateJobRequest$Name": "

The name you assign to this job.

", "CreateJobResponse$Name": "

The unique name of the new job that has been created.

", "CreatePartitionRequest$DatabaseName": "

The name of the metadata database in which the partition is to be created.

", "CreatePartitionRequest$TableName": "

The name of the metadata table in which the partition is to be created.

", "CreateTableRequest$DatabaseName": "

The catalog database in which to create the new table.

", "CreateTriggerRequest$Name": "

The name to assign to the new trigger.

", "CreateTriggerResponse$Name": "

The name assigned to the new trigger.

", "CreateUserDefinedFunctionRequest$DatabaseName": "

The name of the catalog database in which to create the function.

", "Database$Name": "

Name of the database.

", "DatabaseInput$Name": "

Name of the database.

", "DeleteClassifierRequest$Name": "

Name of the Classifier to remove.

", "DeleteConnectionNameList$member": null, "DeleteConnectionRequest$ConnectionName": "

The name of the connection to delete.

", "DeleteCrawlerRequest$Name": "

Name of the Crawler to remove.

", "DeleteDatabaseRequest$Name": "

The name of the Database to delete.

", "DeleteJobRequest$JobName": "

The name of the job to delete.

", "DeleteJobResponse$JobName": "

The name of the job that was deleted.

", "DeletePartitionRequest$DatabaseName": "

The name of the catalog database in which the table in question resides.

", "DeletePartitionRequest$TableName": "

The name of the table where the partition to be deleted is located.

", "DeleteTableRequest$DatabaseName": "

The name of the catalog database in which the table resides.

", "DeleteTableRequest$Name": "

The name of the table to be deleted.

", "DeleteTriggerRequest$Name": "

The name of the trigger to delete.

", "DeleteTriggerResponse$Name": "

The name of the trigger that was deleted.

", "DeleteUserDefinedFunctionRequest$DatabaseName": "

The name of the catalog database where the function is located.

", "DeleteUserDefinedFunctionRequest$FunctionName": "

The name of the function definition to be deleted.

", "ErrorByName$key": null, "ErrorDetail$ErrorCode": "

The code associated with this error.

", "GetClassifierRequest$Name": "

Name of the Classifier to retrieve.

", "GetConnectionRequest$Name": "

The name of the connection definition to retrieve.

", "GetCrawlerRequest$Name": "

Name of the Crawler to retrieve metadata for.

", "GetDatabaseRequest$Name": "

The name of the database to retrieve.

", "GetJobRequest$JobName": "

The name of the job to retrieve.

", "GetJobRunRequest$JobName": "

Name of the job being run.

", "GetJobRunsRequest$JobName": "

The name of the job for which to retrieve all job runs.

", "GetPartitionRequest$DatabaseName": "

The name of the catalog database where the partition resides.

", "GetPartitionRequest$TableName": "

The name of the partition's table.

", "GetPartitionsRequest$DatabaseName": "

The name of the catalog database where the partitions reside.

", "GetPartitionsRequest$TableName": "

The name of the partitions' table.

", "GetTableRequest$DatabaseName": "

The name of the database in the catalog in which the table resides.

", "GetTableRequest$Name": "

The name of the table for which to retrieve the definition.

", "GetTableVersionsRequest$DatabaseName": "

The database in the catalog in which the table resides.

", "GetTableVersionsRequest$TableName": "

The name of the table.

", "GetTablesRequest$DatabaseName": "

The database in the catalog whose tables to list.

", "GetTriggerRequest$Name": "

The name of the trigger to retrieve.

", "GetTriggersRequest$DependentJobName": "

The name of the job for which to retrieve triggers.

", "GetUserDefinedFunctionRequest$DatabaseName": "

The name of the catalog database where the function is located.

", "GetUserDefinedFunctionRequest$FunctionName": "

The name of the function.

", "GetUserDefinedFunctionsRequest$DatabaseName": "

The name of the catalog database where the functions are located.

", "GetUserDefinedFunctionsRequest$Pattern": "

An optional function-name pattern string that filters the function definitions returned.

", "GrokClassifier$Name": "

The name of the classifier.

", "Job$Name": "

The name you assign to this job.

", "JobRun$TriggerName": "

The name of the trigger for this job run.

", "JobRun$JobName": "

The name of the job being run.

", "MatchCriteria$member": null, "NameStringList$member": null, "Order$Column": "

The name of the column.

", "Partition$DatabaseName": "

The name of the catalog database where the table in question is located.

", "Partition$TableName": "

The name of the table in question.

", "PhysicalConnectionRequirements$SubnetId": "

The subnet ID used by the connection.

", "PhysicalConnectionRequirements$AvailabilityZone": "

The connection's availability zone.

", "Predecessor$JobName": "

The name of the predecessor job.

", "SecurityGroupIdList$member": null, "SerDeInfo$Name": "

Name of the SerDe.

", "SerDeInfo$SerializationLibrary": "

Usually the class that implements the SerDe. An example is: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.

", "StartCrawlerRequest$Name": "

Name of the Crawler to start.

", "StartCrawlerScheduleRequest$CrawlerName": "

Name of the crawler to schedule.

", "StartJobRunRequest$JobName": "

The name of the job to start.

", "StartTriggerRequest$Name": "

The name of the trigger to start.

", "StartTriggerResponse$Name": "

The name of the trigger that was started.

", "StopCrawlerRequest$Name": "

Name of the Crawler to stop.

", "StopCrawlerScheduleRequest$CrawlerName": "

Name of the crawler whose schedule state to set.

", "StopTriggerRequest$Name": "

The name of the trigger to stop.

", "StopTriggerResponse$Name": "

The name of the trigger that was stopped.

", "Table$Name": "

Name of the table.

", "Table$DatabaseName": "

Name of the metadata database where the table metadata resides.

", "Table$Owner": "

Owner of the table.

", "Table$CreatedBy": "

Person or entity who created the table.

", "TableError$TableName": "

Name of the table.

", "TableInput$Name": "

Name of the table.

", "TableInput$Owner": "

Owner of the table.

", "Trigger$Name": "

Name of the trigger.

", "TriggerUpdate$Name": "

The name of the trigger.

", "UpdateConnectionRequest$Name": "

The name of the connection definition to update.

", "UpdateCrawlerRequest$Name": "

Name of the new Crawler.

", "UpdateCrawlerScheduleRequest$CrawlerName": "

Name of the crawler whose schedule to update.

", "UpdateDatabaseRequest$Name": "

The name of the metadata database to update in the catalog.

", "UpdateGrokClassifierRequest$Name": "

The name of the GrokClassifier.

", "UpdateJobRequest$JobName": "

Name of the job definition to update.

", "UpdateJobResponse$JobName": "

Returns the name of the updated job.

", "UpdatePartitionRequest$DatabaseName": "

The name of the catalog database in which the table in question resides.

", "UpdatePartitionRequest$TableName": "

The name of the table where the partition to be updated is located.

", "UpdateTableRequest$DatabaseName": "

The name of the catalog database in which the table resides.

", "UpdateTriggerRequest$Name": "

The name of the trigger to update.

", "UpdateUserDefinedFunctionRequest$DatabaseName": "

The name of the catalog database where the function to be updated is located.

", "UpdateUserDefinedFunctionRequest$FunctionName": "

The name of the function.

", "UserDefinedFunction$FunctionName": "

The name of the function.

", "UserDefinedFunction$ClassName": "

The Java class that contains the function code.

", "UserDefinedFunction$OwnerName": "

The owner of the function.

", "UserDefinedFunctionInput$FunctionName": "

The name of the function.

", "UserDefinedFunctionInput$ClassName": "

The Java class that contains the function code.

", "UserDefinedFunctionInput$OwnerName": "

The owner of the function.

" } }, "NameStringList": { "base": null, "refs": { "BatchDeleteConnectionResponse$Succeeded": "

A list of names of the connection definitions that were successfully deleted.

", "SkewedInfo$SkewedColumnNames": "

A list of names of columns that contain skewed values.

", "StorageDescriptor$BucketColumns": "

A list of reducer grouping columns, clustering columns, and bucketing columns in the table.

" } }, "NoScheduleException": { "base": "

There is no applicable schedule.

", "refs": { } }, "NonNegativeDouble": { "base": null, "refs": { "CrawlerMetrics$TimeLeftSeconds": "

The estimated time left to complete a running crawl.

", "CrawlerMetrics$LastRuntimeSeconds": "

The duration of the crawler's most recent run, in seconds.

", "CrawlerMetrics$MedianRuntimeSeconds": "

The median duration of this crawler's runs, in seconds.

" } }, "NonNegativeInteger": { "base": null, "refs": { "CrawlerMetrics$TablesCreated": "

A list of the tables created by this crawler.

", "CrawlerMetrics$TablesUpdated": "

A list of the tables created by this crawler.

", "CrawlerMetrics$TablesDeleted": "

A list of the tables deleted by this crawler.

", "Segment$SegmentNumber": "

The zero-based index number of the this segment. For example, if the total number of segments is 4, SegmentNumber values will range from zero through three.

", "Table$Retention": "

Retention time for this table.

", "TableInput$Retention": "

Retention time for this table.

" } }, "OperationTimeoutException": { "base": "

The operation timed out.

", "refs": { } }, "Order": { "base": "

Specifies the sort order of a sorted column.

", "refs": { "OrderList$member": null } }, "OrderList": { "base": null, "refs": { "StorageDescriptor$SortColumns": "

A list specifying the sort order of each bucket in the table.

" } }, "PageSize": { "base": null, "refs": { "GetClassifiersRequest$MaxResults": "

Size of the list to return (optional).

", "GetConnectionsRequest$MaxResults": "

The maximum number of connections to return in one response.

", "GetCrawlerMetricsRequest$MaxResults": "

The maximum size of a list to return.

", "GetCrawlersRequest$MaxResults": "

The number of Crawlers to return on each call.

", "GetDatabasesRequest$MaxResults": "

The maximum number of databases to return in one response.

", "GetDevEndpointsRequest$MaxResults": "

The maximum size of information to return.

", "GetJobRunsRequest$MaxResults": "

The maximum size of the response.

", "GetJobsRequest$MaxResults": "

The maximum size of the response.

", "GetPartitionsRequest$MaxResults": "

The maximum number of partitions to return in a single response.

", "GetTableVersionsRequest$MaxResults": "

The maximum number of table versions to return in one response.

", "GetTablesRequest$MaxResults": "

The maximum number of tables to return in a single response.

", "GetTriggersRequest$MaxResults": "

The maximum size of the response.

", "GetUserDefinedFunctionsRequest$MaxResults": "

The maximum number of functions to return in one response.

" } }, "ParametersMap": { "base": null, "refs": { "Database$Parameters": "

A list of key-value pairs that define parameters and properties of the database.

", "DatabaseInput$Parameters": "

A list of key-value pairs that define parameters and properties of the database.

", "Partition$Parameters": "

Partition parameters, in the form of a list of key-value pairs.

", "PartitionInput$Parameters": "

Partition parameters, in the form of a list of key-value pairs.

", "SerDeInfo$Parameters": "

A list of initialization parameters for the SerDe, in key-value form.

", "StorageDescriptor$Parameters": "

User-supplied properties in key-value form.

", "Table$Parameters": "

Properties associated with this table, as a list of key-value pairs.

", "TableInput$Parameters": "

Properties associated with this table, as a list of key-value pairs.

" } }, "ParametersMapValue": { "base": null, "refs": { "ParametersMap$value": null } }, "Partition": { "base": "

Represents a slice of table data.

", "refs": { "GetPartitionResponse$Partition": "

The requested information, in the form of a Partition object.

", "PartitionList$member": null } }, "PartitionError": { "base": "

Contains information about a partition error.

", "refs": { "PartitionErrors$member": null } }, "PartitionErrors": { "base": null, "refs": { "BatchCreatePartitionResponse$Errors": "

Errors encountered when trying to create the requested partitions.

", "BatchDeletePartitionResponse$Errors": "

Errors encountered when trying to delete the requested partitions.

" } }, "PartitionInput": { "base": "

The structure used to create and update a partion.

", "refs": { "CreatePartitionRequest$PartitionInput": "

A PartitionInput structure defining the partition to be created.

", "PartitionInputList$member": null, "UpdatePartitionRequest$PartitionInput": "

The new partition object to which to update the partition.

" } }, "PartitionInputList": { "base": null, "refs": { "BatchCreatePartitionRequest$PartitionInputList": "

A list of PartitionInput structures that define the partitions to be created.

" } }, "PartitionList": { "base": null, "refs": { "BatchGetPartitionResponse$Partitions": "

A list of the requested partitions.

", "GetPartitionsResponse$Partitions": "

A list of requested partitions.

" } }, "PartitionValueList": { "base": null, "refs": { "BatchDeletePartitionValueList$member": null, "BatchGetPartitionValueList$member": null } }, "Path": { "base": null, "refs": { "JdbcTarget$Path": "

The path of the JDBC target.

", "PathList$member": null, "S3Target$Path": "

The path to the S3 target.

" } }, "PathList": { "base": null, "refs": { "JdbcTarget$Exclusions": "

A list of items to exclude from the crawl.

", "S3Target$Exclusions": "

A list of S3 objects to exclude from the crawl.

" } }, "PhysicalConnectionRequirements": { "base": "

Specifies the physical requirements for a connection.

", "refs": { "Connection$PhysicalConnectionRequirements": "

A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.

", "ConnectionInput$PhysicalConnectionRequirements": "

A map of physical connection requirements, such as VPC and SecurityGroup, needed for making this connection successfully.

" } }, "Predecessor": { "base": "

A job run that preceded this one.

", "refs": { "PredecessorList$member": null } }, "PredecessorList": { "base": null, "refs": { "JobRun$PredecessorRuns": "

A list of predecessors to this job run.

" } }, "Predicate": { "base": "

Defines the predicate of the trigger, which determines when it fires.

", "refs": { "CreateTriggerRequest$Predicate": "

A predicate to specify when the new trigger should fire.

", "Trigger$Predicate": "

The predicate of this trigger.

", "TriggerUpdate$Predicate": "

The predicate of this trigger, which defines when it will fire.

" } }, "PredicateString": { "base": null, "refs": { "GetPartitionsRequest$Expression": "

An expression filtering the partitions to be returned.

" } }, "PrincipalType": { "base": null, "refs": { "UserDefinedFunction$OwnerType": "

The owner type.

", "UserDefinedFunctionInput$OwnerType": "

The owner type.

" } }, "PythonScript": { "base": null, "refs": { "CreateScriptResponse$PythonScript": "

The Python script generated from the DAG.

", "GetDataflowGraphRequest$PythonScript": "

The Python script to transform.

", "GetPlanResponse$PythonScript": "

A python script to perform the mapping.

" } }, "ResetJobBookmarkRequest": { "base": null, "refs": { } }, "ResetJobBookmarkResponse": { "base": null, "refs": { } }, "ResourceNumberLimitExceededException": { "base": "

A resource numerical limit was exceeded.

", "refs": { } }, "ResourceType": { "base": null, "refs": { "ResourceUri$ResourceType": "

The type of the resource.

" } }, "ResourceUri": { "base": "

URIs for function resources.

", "refs": { "ResourceUriList$member": null } }, "ResourceUriList": { "base": null, "refs": { "UserDefinedFunction$ResourceUris": "

The resource URIs for the function.

", "UserDefinedFunctionInput$ResourceUris": "

The resource URIs for the function.

" } }, "RoleArn": { "base": null, "refs": { "Crawler$Role": "

The ARN of an IAM role used to access customer resources such as data in S3.

", "CreateCrawlerRequest$Role": "

The AWS ARN of the IAM role used by the new Crawler to access customer resources.

", "CreateDevEndpointRequest$RoleArn": "

The IAM role for the DevEndpoint.

", "CreateDevEndpointResponse$RoleArn": "

The AWS ARN of the role assigned to the new DevEndpoint.

", "DevEndpoint$RoleArn": "

The AWS ARN of the IAM role used in this DevEndpoint.

", "UpdateCrawlerRequest$Role": "

The AWS ARN of the IAM role used by the new Crawler to access customer resources.

" } }, "RoleString": { "base": null, "refs": { "CreateJobRequest$Role": "

The role associated with this job.

", "Job$Role": "

The role associated with this job.

", "JobUpdate$Role": "

The role associated with this job.

" } }, "S3Target": { "base": "

Specifies a crawler target in AWS S3.

", "refs": { "S3TargetList$member": null } }, "S3TargetList": { "base": null, "refs": { "CrawlerTargets$S3Targets": "

Specifies targets in AWS S3.

" } }, "Schedule": { "base": "

A scheduling object using a cron statement to schedule an event.

", "refs": { "Crawler$Schedule": "

A Schedule object that specifies the schedule on which this Crawler is to be run.

" } }, "ScheduleState": { "base": null, "refs": { "Schedule$State": "

The state of the schedule.

" } }, "SchedulerNotRunningException": { "base": "

The specified scheduler is not running.

", "refs": { } }, "SchedulerRunningException": { "base": "

The specified scheduler is already running.

", "refs": { } }, "SchedulerTransitioningException": { "base": "

The specified scheduler is transitioning.

", "refs": { } }, "SchemaChangePolicy": { "base": "

Crawler policy for update and deletion behavior.

", "refs": { "Crawler$SchemaChangePolicy": "

Sets policy for the crawler's update and delete behavior.

", "CreateCrawlerRequest$SchemaChangePolicy": "

Policy for the crawler's update and deletion behavior.

", "UpdateCrawlerRequest$SchemaChangePolicy": "

Policy for the crawler's update and deletion behavior.

" } }, "SchemaPathString": { "base": null, "refs": { "MappingEntry$SourcePath": "

The source path.

", "MappingEntry$TargetPath": "

The target path.

" } }, "ScriptLocationString": { "base": null, "refs": { "JobCommand$ScriptLocation": "

Specifies the location of a script that executes a job.

" } }, "SecurityGroupIdList": { "base": null, "refs": { "PhysicalConnectionRequirements$SecurityGroupIdList": "

The security group ID list used by the connection.

" } }, "Segment": { "base": "

Defines a non-overlapping region of a table's partitions, allowing multiple requests to be executed in parallel.

", "refs": { "GetPartitionsRequest$Segment": "

The segment of the table's partitions to scan in this request.

" } }, "SerDeInfo": { "base": "

Information about a serialization/deserialization program (SerDe) which serves as an extractor and loader.

", "refs": { "StorageDescriptor$SerdeInfo": "

Serialization/deserialization (SerDe) information.

" } }, "SkewedInfo": { "base": "

Specifies skewed values in a table. Skewed are ones that occur with very high frequency.

", "refs": { "StorageDescriptor$SkewedInfo": "

Information about values that appear very frequently in a column (skewed values).

" } }, "StartCrawlerRequest": { "base": null, "refs": { } }, "StartCrawlerResponse": { "base": null, "refs": { } }, "StartCrawlerScheduleRequest": { "base": null, "refs": { } }, "StartCrawlerScheduleResponse": { "base": null, "refs": { } }, "StartJobRunRequest": { "base": null, "refs": { } }, "StartJobRunResponse": { "base": null, "refs": { } }, "StartTriggerRequest": { "base": null, "refs": { } }, "StartTriggerResponse": { "base": null, "refs": { } }, "StopCrawlerRequest": { "base": null, "refs": { } }, "StopCrawlerResponse": { "base": null, "refs": { } }, "StopCrawlerScheduleRequest": { "base": null, "refs": { } }, "StopCrawlerScheduleResponse": { "base": null, "refs": { } }, "StopTriggerRequest": { "base": null, "refs": { } }, "StopTriggerResponse": { "base": null, "refs": { } }, "StorageDescriptor": { "base": "

Describes the physical storage of table data.

", "refs": { "Partition$StorageDescriptor": "

Provides information about the physical location where the partition is stored.

", "PartitionInput$StorageDescriptor": "

Provides information about the physical location where the partition is stored.

", "Table$StorageDescriptor": "

A storage descriptor containing information about the physical storage of this table.

", "TableInput$StorageDescriptor": "

A storage descriptor containing information about the physical storage of this table.

" } }, "StringList": { "base": null, "refs": { "ConnectionsList$Connections": "

A list of connections used by the job.

", "CreateDevEndpointRequest$SecurityGroupIds": "

Security group IDs for the security groups to be used by the new DevEndpoint.

", "CreateDevEndpointResponse$SecurityGroupIds": "

The security groups assigned to the new DevEndpoint.

", "DevEndpoint$SecurityGroupIds": "

A list of security group identifiers used in this DevEndpoint.

" } }, "Table": { "base": "

Represents a collection of related data organized in columns and rows.

", "refs": { "GetTableResponse$Table": "

The Table object that defines the specified table.

", "TableList$member": null, "TableVersion$Table": null } }, "TableError": { "base": "

An error record for table operations.

", "refs": { "TableErrors$member": null } }, "TableErrors": { "base": null, "refs": { "BatchDeleteTableResponse$Errors": "

A list of errors encountered in attempting to delete the specified tables.

" } }, "TableInput": { "base": "

Structure used to create or update the table.

", "refs": { "CreateTableRequest$TableInput": "

The TableInput object that defines the metadata table to create in the catalog.

", "UpdateTableRequest$TableInput": "

An updated TableInput object to define the metadata table in the catalog.

" } }, "TableList": { "base": null, "refs": { "GetTablesResponse$TableList": "

A list of the requested Table objects.

" } }, "TableName": { "base": null, "refs": { "MappingEntry$SourceTable": "

The name of the source table.

", "MappingEntry$TargetTable": "

The target table.

" } }, "TablePrefix": { "base": null, "refs": { "Crawler$TablePrefix": "

The table prefix used for catalog tables created.

", "CreateCrawlerRequest$TablePrefix": "

The table prefix used for catalog tables created.

", "UpdateCrawlerRequest$TablePrefix": "

The table prefix used for catalog tables created.

" } }, "TableTypeString": { "base": null, "refs": { "Table$TableType": "

The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.).

", "TableInput$TableType": "

The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.).

" } }, "TableVersion": { "base": null, "refs": { "GetTableVersionsList$member": null } }, "Timestamp": { "base": null, "refs": { "CatalogImportStatus$ImportTime": "

The time that the migration was started.

", "Connection$CreationTime": "

The time this connection definition was created.

", "Connection$LastUpdatedTime": "

The last time this connection definition was updated.

", "Crawler$CreationTime": "

The time when the Crawler was created.

", "Crawler$LastUpdated": "

The time the Crawler was last updated.

", "Database$CreateTime": "

The time at which the metadata database was created in the catalog.

", "GrokClassifier$CreationTime": "

The time this classifier was registered.

", "GrokClassifier$LastUpdated": "

The time this classifier was last updated.

", "LastCrawlInfo$StartTime": "

The time at which the crawl started.

", "Partition$CreationTime": "

The time at which the partition was created.

", "Partition$LastAccessTime": "

The last time at which the partition was accessed.

", "Partition$LastAnalyzedTime": "

The last time at which column statistics were computed for this partition.

", "PartitionInput$LastAccessTime": "

The last time at which the partition was accessed.

", "PartitionInput$LastAnalyzedTime": "

The last time at which column statistics were computed for this partition.

", "Table$CreateTime": "

Time when the table definition was created in the Data Catalog.

", "Table$UpdateTime": "

Last time the table was updated.

", "Table$LastAccessTime": "

Last time the table was accessed. This is usually taken from HDFS, and may not be reliable.

", "Table$LastAnalyzedTime": "

Last time column statistics were computed for this table.

", "TableInput$LastAccessTime": "

Last time the table was accessed.

", "TableInput$LastAnalyzedTime": "

Last time column statistics were computed for this table.

", "UserDefinedFunction$CreateTime": "

The time at which the function was created.

" } }, "TimestampValue": { "base": null, "refs": { "CreateDevEndpointResponse$CreatedTimestamp": "

The point in time at which this DevEndpoint was created.

", "DevEndpoint$CreatedTimestamp": "

The point in time at which this DevEndpoint was created.

", "DevEndpoint$LastModifiedTimestamp": "

The point in time at which this DevEndpoint was last modified.

", "Job$CreatedOn": "

The time and date that this job specification was created.

", "Job$LastModifiedOn": "

The last point in time when this job specification was modified.

", "JobRun$StartedOn": "

The date and time at which this job run was started.

", "JobRun$LastModifiedOn": "

The last time this job run was modified.

", "JobRun$CompletedOn": "

The date and time this job run completed.

" } }, "Token": { "base": null, "refs": { "GetClassifiersRequest$NextToken": "

An optional continuation token.

", "GetClassifiersResponse$NextToken": "

A continuation token.

", "GetConnectionsRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetConnectionsResponse$NextToken": "

A continuation token, if the list of connections returned does not include the last of the filtered connections.

", "GetCrawlerMetricsRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetCrawlerMetricsResponse$NextToken": "

A continuation token, if the returned list does not contain the last metric available.

", "GetCrawlersRequest$NextToken": "

A continuation token, if this is a continuation request.

", "GetCrawlersResponse$NextToken": "

A continuation token, if the returned list has not reached the end of those defined in this customer account.

", "GetDatabasesRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetDatabasesResponse$NextToken": "

A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

", "GetPartitionsRequest$NextToken": "

A continuation token, if this is not the first call to retrieve these partitions.

", "GetPartitionsResponse$NextToken": "

A continuation token, if the returned list of partitions does not does not include the last one.

", "GetTableVersionsRequest$NextToken": "

A continuation token, if this is not the first call.

", "GetTableVersionsResponse$NextToken": "

A continuation token, if the list of available versions does not include the last one.

", "GetTablesRequest$NextToken": "

A continuation token, included if this is a continuation call.

", "GetTablesResponse$NextToken": "

A continuation token, present if the current list segment is not the last.

", "GetUserDefinedFunctionsRequest$NextToken": "

A continuation token, if this is a continuation call.

", "GetUserDefinedFunctionsResponse$NextToken": "

A continuation token, if the list of functions returned does not include the last requested function.

" } }, "TotalSegmentsInteger": { "base": null, "refs": { "Segment$TotalSegments": "

The total numer of segments.

" } }, "Trigger": { "base": "

Information about a specific trigger.

", "refs": { "GetTriggerResponse$Trigger": "

The requested trigger definition.

", "TriggerList$member": null, "UpdateTriggerResponse$Trigger": "

The resulting trigger definition.

" } }, "TriggerList": { "base": null, "refs": { "GetTriggersResponse$Triggers": "

A list of triggers for the specified job.

" } }, "TriggerState": { "base": null, "refs": { "Trigger$State": "

The current state of the trigger.

" } }, "TriggerType": { "base": null, "refs": { "CreateTriggerRequest$Type": "

The type of the new trigger.

", "Trigger$Type": "

The type of trigger that this is.

" } }, "TriggerUpdate": { "base": "

A structure used to provide information used to updata a trigger.

", "refs": { "UpdateTriggerRequest$TriggerUpdate": "

The new values with which to update the trigger.

" } }, "URI": { "base": null, "refs": { "Database$LocationUri": "

The location of the database (for example, an HDFS path).

", "DatabaseInput$LocationUri": "

The location of the database (for example, an HDFS path).

", "ResourceUri$Uri": "

The URI for accessing the resource.

" } }, "UpdateBehavior": { "base": null, "refs": { "SchemaChangePolicy$UpdateBehavior": "

The update behavior.

" } }, "UpdateClassifierRequest": { "base": null, "refs": { } }, "UpdateClassifierResponse": { "base": null, "refs": { } }, "UpdateConnectionRequest": { "base": null, "refs": { } }, "UpdateConnectionResponse": { "base": null, "refs": { } }, "UpdateCrawlerRequest": { "base": null, "refs": { } }, "UpdateCrawlerResponse": { "base": null, "refs": { } }, "UpdateCrawlerScheduleRequest": { "base": null, "refs": { } }, "UpdateCrawlerScheduleResponse": { "base": null, "refs": { } }, "UpdateDatabaseRequest": { "base": null, "refs": { } }, "UpdateDatabaseResponse": { "base": null, "refs": { } }, "UpdateDevEndpointRequest": { "base": null, "refs": { } }, "UpdateDevEndpointResponse": { "base": null, "refs": { } }, "UpdateGrokClassifierRequest": { "base": "

Specifies a Grok classifier to update when passed to UpdateClassifier.

", "refs": { "UpdateClassifierRequest$GrokClassifier": "

A GrokClassifier object with updated fields.

" } }, "UpdateJobRequest": { "base": null, "refs": { } }, "UpdateJobResponse": { "base": null, "refs": { } }, "UpdatePartitionRequest": { "base": null, "refs": { } }, "UpdatePartitionResponse": { "base": null, "refs": { } }, "UpdateTableRequest": { "base": null, "refs": { } }, "UpdateTableResponse": { "base": null, "refs": { } }, "UpdateTriggerRequest": { "base": null, "refs": { } }, "UpdateTriggerResponse": { "base": null, "refs": { } }, "UpdateUserDefinedFunctionRequest": { "base": null, "refs": { } }, "UpdateUserDefinedFunctionResponse": { "base": null, "refs": { } }, "UriString": { "base": null, "refs": { "CreateJobRequest$LogUri": "

Location of the logs for this job.

", "Job$LogUri": "

Location of the logs for this job.

", "JobUpdate$LogUri": "

Location of the logs for this job.

" } }, "UserDefinedFunction": { "base": "

Represents the equivalent of a Hive user-defined function (UDF) definition.

", "refs": { "GetUserDefinedFunctionResponse$UserDefinedFunction": "

The requested function definition.

", "UserDefinedFunctionList$member": null } }, "UserDefinedFunctionInput": { "base": "

A structure used to create or updata a user-defined function.

", "refs": { "CreateUserDefinedFunctionRequest$FunctionInput": "

A FunctionInput object that defines the function to create in the Data Catalog.

", "UpdateUserDefinedFunctionRequest$FunctionInput": "

A FunctionInput object that re-defines the function in the Data Catalog.

" } }, "UserDefinedFunctionList": { "base": null, "refs": { "GetUserDefinedFunctionsResponse$UserDefinedFunctions": "

A list of requested function definitions.

" } }, "ValidationException": { "base": "

A value could not be validated.

", "refs": { } }, "ValueString": { "base": null, "refs": { "BoundedPartitionValueList$member": null, "ConnectionProperties$value": null, "ValueStringList$member": null } }, "ValueStringList": { "base": null, "refs": { "DeletePartitionRequest$PartitionValues": "

The values that define the partition.

", "GetPartitionRequest$PartitionValues": "

The values that define the partition.

", "Partition$Values": "

The values of the partition.

", "PartitionError$PartitionValues": "

The values that define the partition.

", "PartitionInput$Values": "

The values of the partition.

", "PartitionValueList$Values": null } }, "VersionId": { "base": null, "refs": { "Crawler$Version": "

The version of the Crawler.

", "GrokClassifier$Version": "

The version of this classifier.

" } }, "VersionMismatchException": { "base": "

There was a version conflict.

", "refs": { } }, "VersionString": { "base": null, "refs": { "TableVersion$VersionId": null } }, "ViewTextString": { "base": null, "refs": { "Table$ViewOriginalText": "

If the table is a view, the original text of the view; otherwise null.

", "Table$ViewExpandedText": "

If the table is a view, the expanded text of the view; otherwise null.

", "TableInput$ViewOriginalText": "

If the table is a view, the original text of the view; otherwise null.

", "TableInput$ViewExpandedText": "

If the table is a view, the expanded text of the view; otherwise null.

" } } } }