Compare commits
2 Commits
main
...
danderson/
Author | SHA1 | Date |
---|---|---|
![]() |
c31c6745c4 | |
![]() |
06f1192d3d |
|
@ -13,6 +13,7 @@
|
|||
// variables. All configuration is optional.
|
||||
//
|
||||
// - TS_AUTHKEY: the authkey to use for login.
|
||||
// - TS_HOSTNAME: the hostname to request for the node.
|
||||
// - TS_ROUTES: subnet routes to advertise.
|
||||
// - TS_DEST_IP: proxy all incoming Tailscale traffic to the given
|
||||
// destination.
|
||||
|
@ -74,6 +75,7 @@ func main() {
|
|||
|
||||
cfg := &settings{
|
||||
AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""),
|
||||
Hostname: defaultEnv("TS_HOSTNAME", ""),
|
||||
Routes: defaultEnv("TS_ROUTES", ""),
|
||||
ProxyTo: defaultEnv("TS_DEST_IP", ""),
|
||||
DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""),
|
||||
|
@ -394,6 +396,9 @@ func tailscaleUp(ctx context.Context, cfg *settings) error {
|
|||
if cfg.Routes != "" {
|
||||
args = append(args, "--advertise-routes="+cfg.Routes)
|
||||
}
|
||||
if cfg.Hostname != "" {
|
||||
args = append(args, "--hostname="+cfg.Hostname)
|
||||
}
|
||||
if cfg.ExtraArgs != "" {
|
||||
args = append(args, strings.Fields(cfg.ExtraArgs)...)
|
||||
}
|
||||
|
@ -522,6 +527,7 @@ func installIPTablesRule(ctx context.Context, dstStr string, tsIPs []netip.Prefi
|
|||
// settings is all the configuration for containerboot.
|
||||
type settings struct {
|
||||
AuthKey string
|
||||
Hostname string
|
||||
Routes string
|
||||
ProxyTo string
|
||||
DaemonExtraArgs string
|
||||
|
|
|
@ -550,6 +550,22 @@ func TestContainerBoot(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "hostname",
|
||||
Env: map[string]string{
|
||||
"TS_HOSTNAME": "my-server",
|
||||
},
|
||||
Phases: []phase{
|
||||
{
|
||||
WantCmds: []string{
|
||||
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
|
||||
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --hostname=my-server",
|
||||
},
|
||||
}, {
|
||||
Notify: runningNotify,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v2
|
||||
name: chart
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.16.0"
|
|
@ -0,0 +1,22 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "chart.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "chart.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "chart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "chart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
|
@ -0,0 +1,62 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "chart.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "chart.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "chart.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "chart.labels" -}}
|
||||
helm.sh/chart: {{ include "chart.chart" . }}
|
||||
{{ include "chart.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "chart.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "chart.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "chart.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "chart.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,54 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "chart.fullname" . }}
|
||||
namespace: {{ .Values.namespace }}
|
||||
labels:
|
||||
{{- include "chart.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1 # Do not attempt to increase. It will not do what you want.
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "chart.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "chart.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "chart.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
volumes:
|
||||
- name: oauth
|
||||
secret:
|
||||
secretName: operator-oauth
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Values.namespace }}
|
|
@ -0,0 +1,126 @@
|
|||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: proxies
|
||||
namespace: {{ .Values.namespace }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: proxies
|
||||
namespace: {{ .Values.namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: proxies
|
||||
namespace: {{ .Values.namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: proxies
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "chart.fullname" . }}-operator
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "services/status"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "chart.fullname" . }}-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: operator
|
||||
namespace: {{ .Values.namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "chart.fullname" . }}-operator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: operator
|
||||
namespace: {{ .Values.namespace }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "chart.fullname" . }}-operator
|
||||
namespace: {{ .Values.namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "chart.fullname" . }}-operator
|
||||
namespace: {{ .Values.namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "chart.fullname" . }}-operator
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: operator
|
||||
namespace: tailscale
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: operator
|
||||
spec:
|
||||
serviceAccountName: operator
|
||||
volumes:
|
||||
- name: oauth
|
||||
secret:
|
||||
secretName: operator-oauth
|
||||
containers:
|
||||
- name: operator
|
||||
image: tailscale/k8s-operator:latest
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: OPERATOR_HOSTNAME
|
||||
value: tailscale-operator
|
||||
- name: OPERATOR_SECRET
|
||||
value: operator
|
||||
- name: OPERATOR_LOGGING
|
||||
value: info
|
||||
- name: OPERATOR_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CLIENT_ID_FILE
|
||||
value: /oauth/client_id
|
||||
- name: CLIENT_SECRET_FILE
|
||||
value: /oauth/client_secret
|
||||
- name: PROXY_IMAGE
|
||||
value: tailscale/tailscale:latest
|
||||
- name: PROXY_TAGS
|
||||
value: tag:k8s
|
||||
volumeMounts:
|
||||
- name: oauth
|
||||
mountPath: /oauth
|
||||
readOnly: true
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: operator-oauth
|
||||
namespace: {{ .Values.namespace }}
|
||||
stringData:
|
||||
client_id: # SET CLIENT ID HERE
|
||||
client_secret: # SET CLIENT SECRET HERE
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: proxies
|
||||
namespace: {{ .Values.namespace }}
|
||||
labels:
|
||||
{{- include "chart.labels" . | nindent 4 }}
|
||||
--
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: operator
|
||||
namespace: {{ .Values.namespace }}
|
||||
labels:
|
||||
{{- include "chart.labels" . | nindent 4 }}
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "chart.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "chart.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "chart.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
|
@ -0,0 +1,38 @@
|
|||
# Default values for chart.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: tailscale/k8s-operator
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "v1.36.0"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
namespace: "tailscale"
|
||||
|
||||
tailscaleCredential:
|
||||
clientID: "" # YOUR CLIENT ID MUST GO HERE
|
||||
clientSecret: "" # YOUR CLIENT SECRET MUST GO HERE
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
|
||||
securityContext: {}
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 100Mi
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
|
@ -43,6 +43,7 @@ import (
|
|||
"tailscale.com/ipn/store/kubestore"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -235,8 +236,9 @@ const (
|
|||
|
||||
FinalizerName = "tailscale.com/finalizer"
|
||||
|
||||
AnnotationExpose = "tailscale.com/expose"
|
||||
AnnotationTags = "tailscale.com/tags"
|
||||
AnnotationExpose = "tailscale.com/expose"
|
||||
AnnotationTags = "tailscale.com/tags"
|
||||
AnnotationHostname = "tailscale.com/hostname"
|
||||
)
|
||||
|
||||
// ServiceReconciler is a simple ControllerManagedBy example implementation.
|
||||
|
@ -370,6 +372,11 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
|||
// This function adds a finalizer to svc, ensuring that we can handle orderly
|
||||
// deprovisioning later.
|
||||
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) error {
|
||||
hostname, err := nameForService(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !slices.Contains(svc.Finalizers, FinalizerName) {
|
||||
// This log line is printed exactly once during initial provisioning,
|
||||
// because once the finalizer is in place this block gets skipped. So,
|
||||
|
@ -396,7 +403,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create or get API key secret: %w", err)
|
||||
}
|
||||
_, err = a.reconcileSTS(ctx, logger, svc, hsvc, secretName)
|
||||
_, err = a.reconcileSTS(ctx, logger, svc, hsvc, secretName, hostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||
}
|
||||
|
@ -558,7 +565,7 @@ func (a *ServiceReconciler) newAuthKey(ctx context.Context, tags []string) (stri
|
|||
//go:embed manifests/proxy.yaml
|
||||
var proxyYaml []byte
|
||||
|
||||
func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, parentSvc, headlessSvc *corev1.Service, authKeySecret string) (*appsv1.StatefulSet, error) {
|
||||
func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, parentSvc, headlessSvc *corev1.Service, authKeySecret, hostname string) (*appsv1.StatefulSet, error) {
|
||||
var ss appsv1.StatefulSet
|
||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||
|
@ -573,6 +580,10 @@ func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.Sugare
|
|||
corev1.EnvVar{
|
||||
Name: "TS_KUBE_SECRET",
|
||||
Value: authKeySecret,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "TS_HOSTNAME",
|
||||
Value: hostname,
|
||||
})
|
||||
ss.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: headlessSvc.Name,
|
||||
|
@ -679,3 +690,13 @@ func defaultEnv(envName, defVal string) string {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func nameForService(svc *corev1.Service) (string, error) {
|
||||
if h, ok := svc.Annotations[AnnotationHostname]; ok {
|
||||
if err := dnsname.ValidLabel(h); err != nil {
|
||||
return "", fmt.Errorf("invalid Tailscale hostname %q: %w", h, err)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
return svc.Namespace + "-" + svc.Name, nil
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func TestLoadBalancerClass(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test"))
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, then verify reconcile again and verify
|
||||
|
@ -187,7 +187,7 @@ func TestAnnotations(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test"))
|
||||
want := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
|
@ -284,7 +284,7 @@ func TestAnnotationIntoLB(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test"))
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, since it would have normally happened at
|
||||
|
@ -328,7 +328,7 @@ func TestAnnotationIntoLB(t *testing.T) {
|
|||
expectReconciled(t, sr, "default", "test")
|
||||
// None of the proxy machinery should have changed...
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test"))
|
||||
// ... but the service should have a LoadBalancer status.
|
||||
|
||||
want = &corev1.Service{
|
||||
|
@ -400,7 +400,7 @@ func TestLBIntoAnnotation(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test"))
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, then verify reconcile again and verify
|
||||
|
@ -457,7 +457,7 @@ func TestLBIntoAnnotation(t *testing.T) {
|
|||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test"))
|
||||
|
||||
want = &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
@ -481,6 +481,108 @@ func TestLBIntoAnnotation(t *testing.T) {
|
|||
expectEqual(t, fc, want)
|
||||
}
|
||||
|
||||
func TestCustomHostname(t *testing.T) {
|
||||
fc := fake.NewFakeClient()
|
||||
ft := &fakeTSClient{}
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
operatorNamespace: "operator-ns",
|
||||
proxyImage: "tailscale/tailscale",
|
||||
logger: zl.Sugar(),
|
||||
}
|
||||
|
||||
// Create a service that we should manage, and check that the initial round
|
||||
// of objects looks right.
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/expose": "true",
|
||||
"tailscale.com/hostname": "reindeer-flotilla",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
})
|
||||
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test")
|
||||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "reindeer-flotilla"))
|
||||
want := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
Finalizers: []string{"tailscale.com/finalizer"},
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/expose": "true",
|
||||
"tailscale.com/hostname": "reindeer-flotilla",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Turn the service back into a ClusterIP service, which should make the
|
||||
// operator clean up.
|
||||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
delete(s.ObjectMeta.Annotations, "tailscale.com/expose")
|
||||
})
|
||||
// synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
|
||||
// didn't create any child resources since this is all faked, so the
|
||||
// deletion goes through immediately.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
// Second time around, the rest of cleanup happens.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
|
||||
want = &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/hostname": "reindeer-flotilla",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
}
|
||||
|
||||
func expectedSecret(name string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
@ -529,7 +631,7 @@ func expectedHeadlessService(name string) *corev1.Service {
|
|||
}
|
||||
}
|
||||
|
||||
func expectedSTS(stsName, secretName string) *appsv1.StatefulSet {
|
||||
func expectedSTS(stsName, secretName, hostname string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
|
@ -578,6 +680,7 @@ func expectedSTS(stsName, secretName string) *appsv1.StatefulSet {
|
|||
{Name: "TS_AUTH_ONCE", Value: "true"},
|
||||
{Name: "TS_DEST_IP", Value: "10.20.30.40"},
|
||||
{Name: "TS_KUBE_SECRET", Value: secretName},
|
||||
{Name: "TS_HOSTNAME", Value: hostname},
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Capabilities: &corev1.Capabilities{
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package dnsname
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
@ -94,6 +95,31 @@ func (f FQDN) Contains(other FQDN) bool {
|
|||
return strings.HasSuffix(other.WithTrailingDot(), cmp)
|
||||
}
|
||||
|
||||
// ValidLabel reports whether label is a valid DNS label.
|
||||
func ValidLabel(label string) error {
|
||||
if len(label) == 0 {
|
||||
return errors.New("empty DNS label")
|
||||
}
|
||||
if len(label) > maxLabelLength {
|
||||
return fmt.Errorf("%q is too long, max length is %d bytes", label, maxLabelLength)
|
||||
}
|
||||
if !isalphanum(label[0]) {
|
||||
return fmt.Errorf("%q is not a valid DNS label: must start with a letter or number", label)
|
||||
}
|
||||
if !isalphanum(label[len(label)-1]) {
|
||||
return fmt.Errorf("%q is not a valid DNS label: must end with a letter or number", label)
|
||||
}
|
||||
if len(label) < 2 {
|
||||
return nil
|
||||
}
|
||||
for i := 1; i < len(label)-1; i++ {
|
||||
if !isdnschar(label[i]) {
|
||||
return fmt.Errorf("%q is not a valid DNS label: contains invalid character %q", label, label[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SanitizeLabel takes a string intended to be a DNS name label
|
||||
// and turns it into a valid name label according to RFC 1035.
|
||||
func SanitizeLabel(label string) string {
|
||||
|
|
Loading…
Reference in New Issue