tpu支持
This commit is contained in:
26
model_training_nnn/accelerate_config_tpu.yaml
Normal file
26
model_training_nnn/accelerate_config_tpu.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
# Accelerate Configuration for TPU Training
|
||||
# This file configures Accelerate library for 8-core TPU training
|
||||
# with mixed precision (bfloat16) support
|
||||
|
||||
compute_environment: TPU
|
||||
distributed_type: TPU
|
||||
tpu_name: null # Will use default TPU
|
||||
tpu_zone: null # Will use default zone
|
||||
|
||||
# Mixed precision settings (use bfloat16 for TPU)
|
||||
mixed_precision: bf16
|
||||
|
||||
# Number of TPU cores (v3-8 or v4-8 TPUs have 8 cores)
|
||||
num_processes: 8
|
||||
|
||||
# Enable TPU debugging (set to false for production)
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
|
||||
# Logging settings
|
||||
main_process_port: null
|
||||
machine_rank: 0
|
||||
num_machines: 1
|
||||
|
||||
# Enable automatic optimization
|
||||
use_cpu: false
|
Reference in New Issue
Block a user