Skip to content

Commit

Permalink
Merge branch 'master' into upgrade-kernel
Browse files Browse the repository at this point in the history
  • Loading branch information
csuzhangxc committed Sep 20, 2024
2 parents d6bf614 + ff467a6 commit 4615834
Show file tree
Hide file tree
Showing 62 changed files with 766 additions and 260 deletions.
11 changes: 11 additions & 0 deletions cmd/backup-manager/app/backup/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
Expand Down Expand Up @@ -101,6 +102,13 @@ func (bm *Manager) ProcessBackup() error {
return errorutils.NewAggregate(errs)
}

crData, err := json.Marshal(backup)
if err != nil {
klog.Errorf("failed to marshal backup %v to json, err: %v", backup, err)
} else {
klog.Infof("start to process backup: %s", string(crData))
}

// we treat snapshot backup as restarted if its status is not scheduled when backup pod just start to run
// we will clean backup data before run br command
if backup.Spec.Mode == v1alpha1.BackupModeSnapshot && (backup.Status.Phase != v1alpha1.BackupScheduled || v1alpha1.IsBackupRestart(backup)) {
Expand Down Expand Up @@ -132,6 +140,9 @@ func (bm *Manager) ProcessBackup() error {
return bm.performBackup(ctx, backup.DeepCopy(), nil)
}

klog.Infof("start to connect to tidb server (%s:%d) as the .spec.from field is specified",
backup.Spec.From.Host, backup.Spec.From.Port)

// validate and create from db
var db *sql.DB
db, err = bm.validateAndCreateFromDB(ctx, backup.DeepCopy())
Expand Down
8 changes: 8 additions & 0 deletions cmd/backup-manager/app/export/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
Expand Down Expand Up @@ -99,6 +100,13 @@ func (bm *BackupManager) ProcessBackup() error {
return errorutils.NewAggregate(errs)
}

crData, err := json.Marshal(backup)
if err != nil {
klog.Errorf("failed to marshal backup %v to json, err: %v", backup, err)
} else {
klog.Infof("start to process backup: %s", string(crData))
}

reason, err := bm.setOptions(backup)
if err != nil {
errs = append(errs, err)
Expand Down
8 changes: 8 additions & 0 deletions cmd/backup-manager/app/import/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/klog/v2"
)

Expand Down Expand Up @@ -87,6 +88,13 @@ func (rm *RestoreManager) ProcessRestore() error {
return errorutils.NewAggregate(errs)
}

crData, err := json.Marshal(restore)
if err != nil {
klog.Errorf("failed to marshal restore %v to json, err: %s", restore, err)
} else {
klog.Infof("start to process restore: %s", string(crData))
}

rm.setOptions(restore)

return rm.performRestore(ctx, restore.DeepCopy())
Expand Down
11 changes: 11 additions & 0 deletions cmd/backup-manager/app/restore/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
errorutils "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
Expand Down Expand Up @@ -96,12 +97,22 @@ func (rm *Manager) ProcessRestore() error {
return fmt.Errorf("no br config in %s", rm)
}

crData, err := json.Marshal(restore)
if err != nil {
klog.Errorf("failed to marshal restore %v to json, err: %s", restore, err)
} else {
klog.Infof("start to process restore: %s", string(crData))
}

if restore.Spec.To == nil {
return rm.performRestore(ctx, restore.DeepCopy(), nil)
}

rm.setOptions(restore)

klog.Infof("start to connect to tidb server (%s:%d) as the .spec.to field is specified",
restore.Spec.To.Host, restore.Spec.To.Port)

var db *sql.DB
var dsn string
err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) {
Expand Down
2 changes: 1 addition & 1 deletion cmd/http-service/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1

ARG TARGETARCH
RUN dnf install -y tzdata bind-utils && dnf clean all
Expand Down
36 changes: 36 additions & 0 deletions docs/api-references/docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -3394,6 +3394,30 @@ azblob service account credentials.</p>
</tr>
<tr>
<td>
<code>storageAccount</code></br>
<em>
string
</em>
</td>
<td>
<p>StorageAccount is the storage account of the azure blob storage
If this field is set, then use this to set backup-manager env
Otherwise retrieve the storage account from secret</p>
</td>
</tr>
<tr>
<td>
<code>sasToken</code></br>
<em>
string
</em>
</td>
<td>
<p>SasToken is the sas token of the storage account</p>
</td>
</tr>
<tr>
<td>
<code>prefix</code></br>
<em>
string
Expand Down Expand Up @@ -12321,6 +12345,18 @@ int
</tr>
<tr>
<td>
<code>initWaitTime</code></br>
<em>
int
</em>
</td>
<td>
<p>Wait time before pd get started. This wait time is to allow the new DNS record to propagate,
ensuring that the PD DNS resolves to the same IP address as the pod.</p>
</td>
</tr>
<tr>
<td>
<code>mode</code></br>
<em>
string
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-azblob.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
cleanPolicy: Delete
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-ebs-local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,6 @@ spec:
cleanPolicy: Delete
backupType: full
backupMode: volume-snapshot
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
toolImage: localhost:5000/pingcap/br:latest
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-ebs-minio.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,6 @@ spec:
cleanPolicy: Delete
backupType: full
backupMode: volume-snapshot
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
toolImage: localhost:5000/pingcap/br:latest
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
cleanPolicy: Delete
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
cleanPolicy: Delete
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-schedule-azblob.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@ spec:
# maxReservedTime: "2m"
schedule: "*/1 * * * *"
backupTemplate:
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: backup-demo1-tidb-secret
cleanPolicy: Delete
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/backup-schedule-nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@ spec:
# maxReservedTime: "2m"
schedule: "*/1 * * * *"
backupTemplate:
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: backup-demo1-tidb-secret
cleanPolicy: Delete
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-azblob.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,6 @@ metadata:
namespace: default
spec:
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# to:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: restore-demo2-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-ebs-minio.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,6 @@ metadata:
spec:
backupType: full
backupMode: volume-snapshot
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
toolImage: localhost:5000/pingcap/br:latest
br:
cluster: basic
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-local.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,6 @@ metadata:
namespace: default
spec:
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# from:
# host: ${tidb-host}
# port: ${tidb-port}
# user: ${tidb-user}
# secretName: backup-basic-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
6 changes: 0 additions & 6 deletions examples/backup/restore-nfs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,6 @@ metadata:
namespace: default
spec:
# backupType: full
# Only needed for TiDB Operator < v1.1.7 or TiDB < v4.0.8
# to:
# host: ${tidb_host}
# port: ${tidb_port}
# user: ${tidb_user}
# secretName: restore-demo2-tidb-secret
br:
cluster: basic
clusterNamespace: default
Expand Down
15 changes: 8 additions & 7 deletions examples/basic/pd-micro-service-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@ spec:
helper:
image: alpine:3.16.0
pd:
baseImage: pingcap/pd
version: v8.1.0
# TODO: replaced v8.3.0 after v8.3.0 released
baseImage: hub.pingcap.net/devbuild/pd
version: v8.3.0-5427
maxFailoverCount: 0
replicas: 1
# if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
Expand All @@ -28,13 +29,13 @@ spec:
mode: "ms"
pdms:
- name: "tso"
baseImage: pingcap/pd
version: v8.1.0
baseImage: hub.pingcap.net/devbuild/pd
version: v8.3.0-5427
replicas: 2
- name: "scheduling"
baseImage: pingcap/pd
version: v8.1.0
replicas: 1
baseImage: hub.pingcap.net/devbuild/pd
version: v8.3.0-5427
replicas: 2
tikv:
baseImage: pingcap/tikv
version: v8.1.0
Expand Down
2 changes: 1 addition & 1 deletion images/br-federation-manager/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1
ARG TARGETARCH
RUN dnf install -y bind-utils tzdata && dnf clean all
ADD bin/${TARGETARCH}/br-federation-manager /usr/local/bin/br-federation-manager
2 changes: 1 addition & 1 deletion images/br-federation-manager/Dockerfile.e2e
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1

ARG TARGETARCH
RUN dnf install -y tzdata bind-utils && dnf clean all
Expand Down
2 changes: 1 addition & 1 deletion images/tidb-backup-manager/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1
ARG TARGETARCH
ARG RCLONE_VERSION=v1.57.0
ARG SHUSH_VERSION=v1.4.0
Expand Down
2 changes: 1 addition & 1 deletion images/tidb-backup-manager/Dockerfile.e2e
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1
ARG TARGETARCH=amd64
ARG RCLONE_VERSION=v1.57.0
ARG SHUSH_VERSION=v1.4.0
Expand Down
2 changes: 1 addition & 1 deletion images/tidb-operator/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1

ARG TARGETARCH
RUN dnf install -y tzdata bind-utils && dnf clean all
Expand Down
2 changes: 1 addition & 1 deletion images/tidb-operator/Dockerfile.e2e
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM pingcap/pingcap-base:v1
FROM ghcr.io/pingcap-qe/bases/pingcap-base:v1.9.1

RUN dnf install -y tzdata bash bind-utils && dnf clean all

Expand Down
6 changes: 0 additions & 6 deletions manifests/backup/backup-aws-s3-br.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,6 @@ spec:
# timeAgo: <time>
# checksum: true
# sendCredToTikv: true
from:
host: 172.30.6.56
secretName: mySecret
# port: 4000
# user: root
# tlsClientSecretName: <backup-tls-secretname>
s3:
provider: aws
region: us-west-2
Expand Down
6 changes: 0 additions & 6 deletions manifests/backup/backup-gcs-br.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,6 @@ spec:
# rateLimit: 0
# timeAgo: <time>
# checksum: true
from:
host: 172.30.6.56
secretName: my-secret
# port: 4000
# user: root
# tlsClientSecretName: <backup-tls-secretname>
gcs:
projectId: gcp
location: us-west2
Expand Down
Loading

0 comments on commit 4615834

Please sign in to comment.