diff --git a/.github/workflows/e2e-master.yaml b/.github/workflows/e2e-master.yaml index 35927c2547..e318d161e5 100644 --- a/.github/workflows/e2e-master.yaml +++ b/.github/workflows/e2e-master.yaml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9] + python-version: ["3.8", "3.9", "3.10"] steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index dca23462f5..79059f246b 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.7", "3.8", "3.10", "3.11"] + python-version: ["3.8", "3.10", "3.11", "3.12"] include: - python-version: "3.9" use_coverage: 'coverage' diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b1ed49521..f750e89057 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# v32.0.0+snapshot +# v32.0.0a1 Kubernetes API Version: v1.32.0 diff --git a/README.md b/README.md index 6fb1b4a03b..2b3dc63ed4 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,7 @@ supported versions of Kubernetes clusters. - [client 29.y.z](https://pypi.org/project/kubernetes/29.0.0/): Kubernetes 1.28 or below (+-), Kubernetes 1.29 (✓), Kubernetes 1.30 or above (+-) - [client 30.y.z](https://pypi.org/project/kubernetes/30.1.0/): Kubernetes 1.29 or below (+-), Kubernetes 1.30 (✓), Kubernetes 1.31 or above (+-) - [client 31.y.z](https://pypi.org/project/kubernetes/31.0.0/): Kubernetes 1.30 or below (+-), Kubernetes 1.31 (✓), Kubernetes 1.32 or above (+-) +- [client 32.y.z](https://pypi.org/project/kubernetes/32.0.0a1/): Kubernetes 1.31 or below (+-), Kubernetes 1.32 (✓), Kubernetes 1.33 or above (+-) > See [here](#homogenizing-the-kubernetes-python-client-versions) for an explanation of why there is no v13-v16 release. @@ -168,6 +169,7 @@ between client-python versions. | 30.0 | Kubernetes main repo, 1.30 branch | ✓ | | 31.0 Alpha/Beta | Kubernetes main repo, 1.31 branch | ✗ | | 31.0 | Kubernetes main repo, 1.31 branch | ✓ | +| 32.0 Alpha/Beta | Kubernetes main repo, 1.32 branch | ✓ | > See [here](#homogenizing-the-kubernetes-python-client-versions) for an explanation of why there is no v13-v16 release. diff --git a/kubernetes/.openapi-generator/swagger.json.sha256 b/kubernetes/.openapi-generator/swagger.json.sha256 index 9d3273afe1..343524c1e5 100644 --- a/kubernetes/.openapi-generator/swagger.json.sha256 +++ b/kubernetes/.openapi-generator/swagger.json.sha256 @@ -1 +1 @@ -5f773c685cb5e7b97c1b3be4a7cff387a8077a4789c738dac715ba91b1c50eda \ No newline at end of file +b20f45563c8a98d4f6f0ccc8fea807c5a646510c40aee9183d87b0be22104194 \ No newline at end of file diff --git a/kubernetes/README.md b/kubernetes/README.md index 2886ef83a9..af06c24cf4 100644 --- a/kubernetes/README.md +++ b/kubernetes/README.md @@ -4,7 +4,7 @@ No description provided (generated by Openapi Generator https://github.com/opena This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: - API version: release-1.32 -- Package version: 32.0.0+snapshot +- Package version: 32.0.0a1 - Build package: org.openapitools.codegen.languages.PythonClientCodegen ## Requirements. diff --git a/kubernetes/__init__.py b/kubernetes/__init__.py index e46fe7c902..5f1bb54d71 100644 --- a/kubernetes/__init__.py +++ b/kubernetes/__init__.py @@ -14,7 +14,7 @@ __project__ = 'kubernetes' # The version is auto-updated. Please do not edit. -__version__ = "32.0.0+snapshot" +__version__ = "32.0.0a1" from . import client from . import config diff --git a/kubernetes/client/__init__.py b/kubernetes/client/__init__.py index e46821316b..3f5250a5fa 100644 --- a/kubernetes/client/__init__.py +++ b/kubernetes/client/__init__.py @@ -14,7 +14,7 @@ from __future__ import absolute_import -__version__ = "32.0.0+snapshot" +__version__ = "32.0.0a1" # import apis into sdk package from kubernetes.client.api.well_known_api import WellKnownApi diff --git a/kubernetes/client/api_client.py b/kubernetes/client/api_client.py index 53d94ce40b..c9ed6c2136 100644 --- a/kubernetes/client/api_client.py +++ b/kubernetes/client/api_client.py @@ -78,7 +78,7 @@ def __init__(self, configuration=None, header_name=None, header_value=None, self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. - self.user_agent = 'OpenAPI-Generator/32.0.0+snapshot/python' + self.user_agent = 'OpenAPI-Generator/32.0.0a1/python' self.client_side_validation = configuration.client_side_validation def __enter__(self): diff --git a/kubernetes/client/configuration.py b/kubernetes/client/configuration.py index e1c0ff2dc5..f29025affa 100644 --- a/kubernetes/client/configuration.py +++ b/kubernetes/client/configuration.py @@ -354,7 +354,7 @@ def to_debug_report(self): "OS: {env}\n"\ "Python Version: {pyversion}\n"\ "Version of the API: release-1.32\n"\ - "SDK Package Version: 32.0.0+snapshot".\ + "SDK Package Version: 32.0.0a1".\ format(env=sys.platform, pyversion=sys.version) def get_host_settings(self): diff --git a/kubernetes/client/models/v1alpha3_resource_claim_status.py b/kubernetes/client/models/v1alpha3_resource_claim_status.py index bb8a470e1d..b5083f8f90 100644 --- a/kubernetes/client/models/v1alpha3_resource_claim_status.py +++ b/kubernetes/client/models/v1alpha3_resource_claim_status.py @@ -110,7 +110,7 @@ def devices(self, devices): def reserved_for(self): """Gets the reserved_for of this V1alpha3ResourceClaimStatus. # noqa: E501 - ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 32 such reservations. This may get increased in the future, but not reduced. # noqa: E501 + ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501 :return: The reserved_for of this V1alpha3ResourceClaimStatus. # noqa: E501 :rtype: list[V1alpha3ResourceClaimConsumerReference] @@ -121,7 +121,7 @@ def reserved_for(self): def reserved_for(self, reserved_for): """Sets the reserved_for of this V1alpha3ResourceClaimStatus. - ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 32 such reservations. This may get increased in the future, but not reduced. # noqa: E501 + ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501 :param reserved_for: The reserved_for of this V1alpha3ResourceClaimStatus. # noqa: E501 :type: list[V1alpha3ResourceClaimConsumerReference] diff --git a/kubernetes/client/models/v1beta1_resource_claim_status.py b/kubernetes/client/models/v1beta1_resource_claim_status.py index 67cf633317..38d7efce72 100644 --- a/kubernetes/client/models/v1beta1_resource_claim_status.py +++ b/kubernetes/client/models/v1beta1_resource_claim_status.py @@ -110,7 +110,7 @@ def devices(self, devices): def reserved_for(self): """Gets the reserved_for of this V1beta1ResourceClaimStatus. # noqa: E501 - ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 32 such reservations. This may get increased in the future, but not reduced. # noqa: E501 + ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501 :return: The reserved_for of this V1beta1ResourceClaimStatus. # noqa: E501 :rtype: list[V1beta1ResourceClaimConsumerReference] @@ -121,7 +121,7 @@ def reserved_for(self): def reserved_for(self, reserved_for): """Sets the reserved_for of this V1beta1ResourceClaimStatus. - ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 32 such reservations. This may get increased in the future, but not reduced. # noqa: E501 + ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501 :param reserved_for: The reserved_for of this V1beta1ResourceClaimStatus. # noqa: E501 :type: list[V1beta1ResourceClaimConsumerReference] diff --git a/kubernetes/docs/V1alpha3ResourceClaimStatus.md b/kubernetes/docs/V1alpha3ResourceClaimStatus.md index d2466fe164..c058e540a6 100644 --- a/kubernetes/docs/V1alpha3ResourceClaimStatus.md +++ b/kubernetes/docs/V1alpha3ResourceClaimStatus.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **allocation** | [**V1alpha3AllocationResult**](V1alpha3AllocationResult.md) | | [optional] **devices** | [**list[V1alpha3AllocatedDeviceStatus]**](V1alpha3AllocatedDeviceStatus.md) | Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers. | [optional] -**reserved_for** | [**list[V1alpha3ResourceClaimConsumerReference]**](V1alpha3ResourceClaimConsumerReference.md) | ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 32 such reservations. This may get increased in the future, but not reduced. | [optional] +**reserved_for** | [**list[V1alpha3ResourceClaimConsumerReference]**](V1alpha3ResourceClaimConsumerReference.md) | ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/kubernetes/docs/V1beta1ResourceClaimStatus.md b/kubernetes/docs/V1beta1ResourceClaimStatus.md index 401b53eae1..5c46654337 100644 --- a/kubernetes/docs/V1beta1ResourceClaimStatus.md +++ b/kubernetes/docs/V1beta1ResourceClaimStatus.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **allocation** | [**V1beta1AllocationResult**](V1beta1AllocationResult.md) | | [optional] **devices** | [**list[V1beta1AllocatedDeviceStatus]**](V1beta1AllocatedDeviceStatus.md) | Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers. | [optional] -**reserved_for** | [**list[V1beta1ResourceClaimConsumerReference]**](V1beta1ResourceClaimConsumerReference.md) | ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 32 such reservations. This may get increased in the future, but not reduced. | [optional] +**reserved_for** | [**list[V1beta1ResourceClaimConsumerReference]**](V1beta1ResourceClaimConsumerReference.md) | ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/kubernetes/swagger.json.unprocessed b/kubernetes/swagger.json.unprocessed index ad115fd0ad..ffbe70c72e 100644 --- a/kubernetes/swagger.json.unprocessed +++ b/kubernetes/swagger.json.unprocessed @@ -15123,7 +15123,7 @@ "x-kubernetes-list-type": "map" }, "reservedFor": { - "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "items": { "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference" }, @@ -15956,7 +15956,7 @@ "x-kubernetes-list-type": "map" }, "reservedFor": { - "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "items": { "$ref": "#/definitions/io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference" }, diff --git a/scripts/constants.py b/scripts/constants.py index 2f9c44228b..7e9db65bd8 100644 --- a/scripts/constants.py +++ b/scripts/constants.py @@ -18,7 +18,7 @@ KUBERNETES_BRANCH = "release-1.32" # client version for packaging and releasing. -CLIENT_VERSION = "32.0.0+snapshot" +CLIENT_VERSION = "32.0.0a1" # Name of the release package PACKAGE_NAME = "kubernetes" diff --git a/scripts/swagger.json b/scripts/swagger.json index 151f99da50..1c3b32f4c1 100644 --- a/scripts/swagger.json +++ b/scripts/swagger.json @@ -15199,7 +15199,7 @@ "x-kubernetes-list-type": "map" }, "reservedFor": { - "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "items": { "$ref": "#/definitions/v1alpha3.ResourceClaimConsumerReference" }, @@ -16032,7 +16032,7 @@ "x-kubernetes-list-type": "map" }, "reservedFor": { - "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", "items": { "$ref": "#/definitions/v1beta1.ResourceClaimConsumerReference" }, diff --git a/setup.py b/setup.py index 44a353a20f..5d29758ec8 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ # Do not edit these constants. They will be updated automatically # by scripts/update-client.sh. -CLIENT_VERSION = "32.0.0+snapshot" +CLIENT_VERSION = "32.0.0a1" PACKAGE_NAME = "kubernetes" DEVELOPMENT_STATUS = "3 - Alpha"