生产部署
本章介绍如何将 gRPC 服务部署到生产环境,包括 Docker 容器化、Kubernetes 编排、负载均衡、监控告警等内容。
部署架构概览
Docker 容器化
Go 服务 Dockerfile
# 构建阶段
FROM golang:1.21-alpine AS builder
WORKDIR /app
# 安装依赖
RUN apk add --no-cache git make
# 复制 go.mod 和 go.sum
COPY go.mod go.sum ./
RUN go mod download
# 复制源代码
COPY . .
# 构建
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o server ./cmd/server
# 运行阶段
FROM alpine:latest
RUN apk --no-cache add ca-certificates
WORKDIR /app
# 复制二进制文件
COPY --from=builder /app/server .
# 复制配置文件
COPY --from=builder /app/config ./config
# 暴露端口
EXPOSE 50051
# 健康检查
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD grpc_health_probe -addr=:50051 || exit 1
# 运行
ENTRYPOINT ["./server"]
Python 服务 Dockerfile
FROM python:3.12-slim
WORKDIR /app
# 安装依赖
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# 复制源代码
COPY . .
# 暴露端口
EXPOSE 50051
# 健康检查
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD python -c "import grpc; from grpc_health.v1 import health_pb2, health_pb2_grpc; \
channel = grpc.insecure_channel('localhost:50051'); \
stub = health_pb2_grpc.HealthStub(channel); \
stub.Check(health_pb2.HealthCheckRequest())" || exit 1
# 运行
CMD ["python", "server/main.py"]
Java 服务 Dockerfile
# 构建阶段
FROM maven:3.9-eclipse-temurin-17 AS builder
WORKDIR /app
# 复制 pom.xml
COPY pom.xml .
RUN mvn dependency:go-offline
# 复制源代码
COPY src ./src
# 构建
RUN mvn package -DskipTests
# 运行阶段
FROM eclipse-temurin:17-jre-alpine
WORKDIR /app
# 复制 jar 文件
COPY --from=builder /app/target/*.jar app.jar
# 暴露端口
EXPOSE 50051
# 健康检查
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
CMD grpc_health_probe -addr=:50051 || exit 1
# 运行
ENTRYPOINT ["java", "-jar", "app.jar"]
.NET 服务 Dockerfile
# 构建阶段
FROM mcr.microsoft.com/dotnet/sdk:8.0 AS builder
WORKDIR /app
# 复制 csproj 并还原依赖
COPY *.csproj ./
RUN dotnet restore
# 复制源代码并构建
COPY . .
RUN dotnet publish -c Release -o out
# 运行阶段
FROM mcr.microsoft.com/dotnet/aspnet:8.0
WORKDIR /app
# 复制构建输出
COPY --from=builder /app/out .
# 暴露端口
EXPOSE 50051
# 健康检查
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD grpc_health_probe -addr=:50051 || exit 1
# 运行
ENTRYPOINT ["dotnet", "GrpcService.dll"]
Docker Compose 示例
# docker-compose.yml
version: '3.8'
services:
# gRPC 服务
grpc-server:
build:
context: .
dockerfile: Dockerfile
ports:
- "50051:50051"
environment:
- GRPC_SERVER_ADDRESS=0.0.0.0:50051
- DATABASE_URL=postgres://user:pass@postgres:5432/mydb
depends_on:
- postgres
- redis
networks:
- backend
restart: unless-stopped
deploy:
resources:
limits:
cpus: '2'
memory: 1G
reservations:
cpus: '0.5'
memory: 256M
healthcheck:
test: ["CMD", "grpc_health_probe", "-addr=:50051"]
interval: 30s
timeout: 3s
retries: 3
start_period: 10s
# Envoy 代理(用于 gRPC-Web)
envoy:
image: envoyproxy/envoy:v1.28-latest
ports:
- "8080:8080"
- "9901:9901"
volumes:
- ./envoy.yaml:/etc/envoy/envoy.yaml
depends_on:
- grpc-server
networks:
- backend
# 数据库
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_USER=user
- POSTGRES_PASSWORD=pass
- POSTGRES_DB=mydb
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- backend
# 缓存
redis:
image: redis:7-alpine
volumes:
- redis-data:/data
networks:
- backend
# Prometheus 监控
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
networks:
- backend
# Grafana 可视化
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
volumes:
- grafana-data:/var/lib/grafana
networks:
- backend
volumes:
postgres-data:
redis-data:
grafana-data:
networks:
backend:
driver: bridge
Kubernetes 部署
Namespace 配置
# namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: grpc-services
labels:
name: grpc-services
ConfigMap 配置
# configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: grpc-server-config
namespace: grpc-services
data:
# 服务配置
GRPC_SERVER_ADDRESS: "0.0.0.0:50051"
GRPC_MAX_CONNECTION_AGE: "30m"
GRPC_KEEPALIVE_TIME: "10s"
GRPC_KEEPALIVE_TIMEOUT: "3s"
# 数据库配置
DATABASE_HOST: "postgres-service"
DATABASE_PORT: "5432"
DATABASE_NAME: "mydb"
# 日志级别
LOG_LEVEL: "info"
Secret 配置
# secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: grpc-server-secret
namespace: grpc-services
type: Opaque
stringData:
DATABASE_USER: "app_user"
DATABASE_PASSWORD: "secure_password"
JWT_SECRET: "your-jwt-secret-key"
TLS_CERT: |
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
TLS_KEY: |
-----BEGIN PRIVATE KEY-----
...
-----END PRIVATE KEY-----
Deployment 配置
# deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: grpc-server
namespace: grpc-services
labels:
app: grpc-server
spec:
replicas: 3
selector:
matchLabels:
app: grpc-server
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: grpc-server
version: v1
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
prometheus.io/path: "/metrics"
spec:
# 优雅终止期
terminationGracePeriodSeconds: 30
# 健康检查配置
containers:
- name: grpc-server
image: my-registry/grpc-server:v1.0.0
imagePullPolicy: Always
ports:
- name: grpc
containerPort: 50051
protocol: TCP
- name: metrics
containerPort: 9090
protocol: TCP
# 环境变量
envFrom:
- configMapRef:
name: grpc-server-config
- secretRef:
name: grpc-server-secret
# 资源限制
resources:
requests:
cpu: "500m"
memory: "256Mi"
limits:
cpu: "2000m"
memory: "1Gi"
# 存活探针
livenessProbe:
exec:
command:
- /bin/grpc_health_probe
- -addr=:50051
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 3
# 就绪探针
readinessProbe:
exec:
command:
- /bin/grpc_health_probe
- -addr=:50051
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 2
# 启动探针
startupProbe:
exec:
command:
- /bin/grpc_health_probe
- -addr=:50051
initialDelaySeconds: 0
periodSeconds: 2
timeoutSeconds: 2
failureThreshold: 30
# 卷挂载
volumeMounts:
- name: tls-certs
mountPath: /etc/tls
readOnly: true
volumes:
- name: tls-certs
secret:
secretName: grpc-server-secret
items:
- key: TLS_CERT
path: server.crt
- key: TLS_KEY
path: server.key
Service 配置(Headless)
# service-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: grpc-server-headless
namespace: grpc-services
labels:
app: grpc-server
spec:
type: ClusterIP
clusterIP: None # Headless Service
selector:
app: grpc-server
ports:
- name: grpc
port: 50051
targetPort: 50051
protocol: TCP
Service 配置(ClusterIP)
# service.yaml
apiVersion: v1
kind: Service
metadata:
name: grpc-server
namespace: grpc-services
labels:
app: grpc-server
spec:
type: ClusterIP
selector:
app: grpc-server
ports:
- name: grpc
port: 50051
targetPort: 50051
protocol: TCP
HorizontalPodAutoscaler 配置
# hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: grpc-server-hpa
namespace: grpc-services
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: grpc-server
minReplicas: 3
maxReplicas: 20
metrics:
# CPU 使用率
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
# 内存使用率
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
# 自定义指标(请求数/秒)
- type: Pods
pods:
metric:
name: grpc_requests_per_second
target:
type: AverageValue
averageValue: "1000"
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 10
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 60
policies:
- type: Percent
value: 100
periodSeconds: 15
- type: Pods
value: 4
periodSeconds: 15
selectPolicy: Max
PodDisruptionBudget 配置
# pdb.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: grpc-server-pdb
namespace: grpc-services
spec:
minAvailable: 2
selector:
matchLabels:
app: grpc-server
Ingress/Gateway 配置
使用 Nginx Ingress
# ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grpc-ingress
namespace: grpc-services
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-buffer-size: "128k"
nginx.ingress.kubernetes.io/proxy-buffers-number: "4"
spec:
ingressClassName: nginx
tls:
- hosts:
- grpc.example.com
secretName: grpc-tls-secret
rules:
- host: grpc.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: grpc-server
port:
number: 50051
使用 Istio Gateway
# gateway.yaml
apiVersion: networking.istio.io/v1beta1
kind: Gateway
metadata:
name: grpc-gateway
namespace: grpc-services
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 443
name: https
protocol: HTTPS
tls:
mode: SIMPLE
credentialName: grpc-tls-secret
hosts:
- "grpc.example.com"
---
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: grpc-virtualservice
namespace: grpc-services
spec:
hosts:
- "grpc.example.com"
gateways:
- grpc-gateway
http:
- match:
- port: 443
route:
- destination:
host: grpc-server
port:
number: 50051
负载均衡策略
客户端负载均衡(推荐)
在 Kubernetes 中使用 Headless Service + 客户端负载均衡:
# 客户端配置
apiVersion: v1
kind: ConfigMap
metadata:
name: client-config
data:
# 使用 DNS 解析获取所有 Pod 地址
GRPC_SERVER_ADDRESS: "dns:///grpc-server-headless.grpc-services.svc.cluster.local:50051"
# 启用 round_robin 负载均衡
GRPC_LB_POLICY: "round_robin"
服务端负载均衡
使用 Nginx 或 Envoy 作为代理:
Nginx 配置:
# nginx.conf
upstream grpc_backend {
least_conn; # 最少连接算法
server grpc-server-1:50051 max_fails=3 fail_timeout=30s;
server grpc-server-2:50051 max_fails=3 fail_timeout=30s;
server grpc-server-3:50051 max_fails=3 fail_timeout=30s;
}
server {
listen 50051 http2;
location / {
grpc_pass grpc://grpc_backend;
grpc_connect_timeout 10s;
grpc_send_timeout 60s;
grpc_read_timeout 60s;
}
}
Envoy 配置:
# envoy.yaml
static_resources:
listeners:
- name: grpc_listener
address:
socket_address:
address: 0.0.0.0
port_value: 50051
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: auto
stat_prefix: grpc
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: grpc_service
timeout: 60s
http_filters:
- name: envoy.filters.http.router
clusters:
- name: grpc_service
connect_timeout: 10s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
http2_protocol_options: {}
load_assignment:
cluster_name: grpc_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: grpc-server-headless
port_value: 50051
监控和可观测性
Prometheus 配置
# prometheus.yml
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'grpc-server'
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- grpc-services
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: `$1:$2`
target_label: __address__
关键监控指标
服务端指标:
# 请求计数
grpc_server_handled_total{method="SayHello",status="OK"} 1000
# 请求延迟
grpc_server_handling_seconds_bucket{method="SayHello",le="0.001"} 100
grpc_server_handling_seconds_bucket{method="SayHello",le="0.01"} 500
grpc_server_handling_seconds_bucket{method="SayHello",le="0.1"} 900
# 消息大小
grpc_server_msg_received_total{method="SayHello"} 1000
grpc_server_msg_sent_total{method="SayHello"} 1000
# 连接状态
grpc_server_started_total{method="SayHello"} 1000
客户端指标:
# 客户端请求计数
grpc_client_handled_total{method="SayHello",status="OK"} 500
# 客户端延迟
grpc_client_handling_seconds_bucket{method="SayHello",le="0.01"} 200
Grafana 仪表板
{
"dashboard": {
"title": "gRPC Dashboard",
"panels": [
{
"title": "请求速率",
"type": "graph",
"targets": [
{
"expr": "sum(rate(grpc_server_handled_total[1m])) by (method)"
}
]
},
{
"title": "P99 延迟",
"type": "graph",
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket[1m])) by (le, method))"
}
]
},
{
"title": "错误率",
"type": "graph",
"targets": [
{
"expr": "sum(rate(grpc_server_handled_total{status!=\"OK\"}[1m])) by (method)"
}
]
}
]
}
}
告警规则
# alerts.yaml
groups:
- name: grpc-alerts
rules:
# 高错误率
- alert: HighErrorRate
expr: |
sum(rate(grpc_server_handled_total{status!="OK"}[5m])) by (method)
/
sum(rate(grpc_server_handled_total[5m])) by (method)
> 0.01
for: 5m
labels:
severity: critical
annotations:
summary: "gRPC 错误率过高"
description: "方法 `{{ $labels.method }}` 错误率超过 1%"
# 高延迟
- alert: HighLatency
expr: |
histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket[5m])) by (le, method))
> 1
for: 5m
labels:
severity: warning
annotations:
summary: "gRPC 延迟过高"
description: "方法 `{{ $labels.method }}` P99 延迟超过 1 秒"
# 服务不可用
- alert: ServiceUnavailable
expr: up{job="grpc-server"} == 0
for: 1m
labels:
severity: critical
annotations:
summary: "gRPC 服务不可用"
description: "服务 {{ $labels.instance }} 无法访问"
日志收集
结构化日志配置
// Go 结构化日志
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func setupLogger() *zap.Logger {
config := zap.Config{
Level: zap.NewAtomicLevelAt(zapcore.InfoLevel),
Development: false,
Encoding: "json",
EncoderConfig: zapcore.EncoderConfig{
TimeKey: "timestamp",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "message",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
},
OutputPaths: []string{"stdout"},
ErrorOutputPaths: []string{"stderr"},
}
logger, _ := config.Build()
return logger
}
// 使用日志
logger.Info("处理请求",
zap.String("method", info.FullMethod),
zap.String("request_id", requestID),
zap.Duration("latency", duration),
)
Fluentd/Fluent Bit 配置
# fluent-bit.conf
[INPUT]
Name tail
Tag kube.*
Path /var/log/containers/*.log
Parser docker
DB /var/log/flb_kube.db
Mem_Buf_Limit 5MB
Skip_Long_Lines On
Refresh_Interval 10
[FILTER]
Name kubernetes
Match kube.*
Kube_URL https://kubernetes.default.svc:443
Kube_CA_File /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
Kube_Token_File /var/run/secrets/kubernetes.io/serviceaccount/token
Merge_Log On
K8S-Logging.Parser On
K8S-Logging.Exclude On
[OUTPUT]
Name es
Match *
Host elasticsearch
Port 9200
Logstash_Format On
Retry_Limit False
CI/CD 部署流程
GitHub Actions 示例
# .github/workflows/deploy.yml
name: Build and Deploy
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.21'
- name: Run tests
run: |
go test -v -race -coverprofile=coverage.out ./...
- name: Build
run: |
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o server ./cmd/server
- name: Build Docker image
run: |
docker build -t my-registry/grpc-server:${{ github.sha }} .
- name: Push to registry
run: |
echo ${{ secrets.REGISTRY_PASSWORD }} | docker login -u ${{ secrets.REGISTRY_USER }} --password-stdin
docker push my-registry/grpc-server:${{ github.sha }}
deploy:
needs: build
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Set up kubectl
uses: azure/setup-kubectl@v3
- name: Configure kubectl
run: |
mkdir -p ~/.kube
echo "${{ secrets.KUBE_CONFIG }}" | base64 -d > ~/.kube/config
- name: Deploy to Kubernetes
run: |
# 更新镜像版本
kubectl set image deployment/grpc-server \
grpc-server=my-registry/grpc-server:${{ github.sha }} \
-n grpc-services
# 等待部署完成
kubectl rollout status deployment/grpc-server -n grpc-services --timeout=300s
- name: Run smoke tests
run: |
# 执行健康检查
kubectl exec -it deployment/grpc-server -n grpc-services -- \
grpc_health_probe -addr=:50051
GitLab CI 示例
# .gitlab-ci.yml
stages:
- test
- build
- deploy
variables:
IMAGE_NAME: my-registry/grpc-server
IMAGE_TAG: $CI_COMMIT_SHORT_SHA
test:
stage: test
image: golang:1.21
script:
- go test -v -race -coverprofile=coverage.out ./...
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
build:
stage: build
image: docker:latest
services:
- docker:dind
script:
- docker login -u $REGISTRY_USER -p $REGISTRY_PASSWORD $REGISTRY_URL
- docker build -t $IMAGE_NAME:$IMAGE_TAG .
- docker push $IMAGE_NAME:$IMAGE_TAG
only:
- main
deploy:
stage: deploy
image: bitnami/kubectl:latest
script:
- kubectl config use-context $KUBE_CONTEXT
- kubectl set image deployment/grpc-server grpc-server=$IMAGE_NAME:$IMAGE_TAG -n grpc-services
- kubectl rollout status deployment/grpc-server -n grpc-services --timeout=300s
only:
- main
environment:
name: production
url: https://grpc.example.com
生产环境最佳实践
1. 资源配置
# 推荐资源配置
resources:
requests:
cpu: "500m" # 保证最小 CPU
memory: "256Mi" # 保证最小内存
limits:
cpu: "2000m" # 限制最大 CPU
memory: "1Gi" # 限制最大内存
2. 健康检查
# 存活探针:检测死锁或严重问题
livenessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:50051"]
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 3
# 就绪探针:检测是否可以接收流量
readinessProbe:
exec:
command: ["/bin/grpc_health_probe", "-addr=:50051"]
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 2
3. 滚动更新配置
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1 # 最多多启动 1 个 Pod
maxUnavailable: 0 # 保证服务可用
# 优雅终止
terminationGracePeriodSeconds: 30
4. 安全配置
# Pod 安全上下文
securityContext:
runAsNonRoot: true
runAsUser: 1000
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# 网络策略
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: grpc-network-policy
spec:
podSelector:
matchLabels:
app: grpc-server
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
name: istio-system
ports:
- protocol: TCP
port: 50051
egress:
- to:
- namespaceSelector:
matchLabels:
name: database
ports:
- protocol: TCP
port: 5432
5. 配置管理
# 使用 ConfigMap 和 Secret 分离配置
envFrom:
- configMapRef:
name: grpc-server-config
- secretRef:
name: grpc-server-secret
小结
本章介绍了 gRPC 服务生产部署的完整流程:
- Docker 容器化:多阶段构建、健康检查
- Kubernetes 部署:Deployment、Service、HPA、Ingress
- 负载均衡:客户端负载均衡、服务端代理
- 监控告警:Prometheus 指标、Grafana 仪表板、告警规则
- 日志收集:结构化日志、Fluent Bit 配置
- CI/CD:GitHub Actions、GitLab CI 自动化部署
- 最佳实践:资源配置、健康检查、滚动更新、安全配置
生产环境部署需要综合考虑可用性、可扩展性、安全性和可观测性,确保服务稳定运行。
[!TIP] 建议使用 Service Mesh(如 Istio)来管理 gRPC 流量,它提供了自动的负载均衡、熔断、重试和可观测性功能,无需修改应用代码。