Remove CPU optimization call and add logging for TPU strategy and data pipeline performance
This commit is contained in:
@@ -50,9 +50,6 @@ class BrainToTextDecoderTrainerTF:
|
|||||||
self.args = args
|
self.args = args
|
||||||
self.logger = None
|
self.logger = None
|
||||||
|
|
||||||
# Optimize CPU utilization for data pipeline (利用224核心)
|
|
||||||
self._configure_cpu_optimization()
|
|
||||||
|
|
||||||
# Initialize TPU strategy
|
# Initialize TPU strategy
|
||||||
self.strategy = create_tpu_strategy()
|
self.strategy = create_tpu_strategy()
|
||||||
if self.strategy is None:
|
if self.strategy is None:
|
||||||
@@ -60,6 +57,8 @@ class BrainToTextDecoderTrainerTF:
|
|||||||
|
|
||||||
print(f"Training on {self.strategy.num_replicas_in_sync} TPU cores")
|
print(f"Training on {self.strategy.num_replicas_in_sync} TPU cores")
|
||||||
print(f"Strategy type: {type(self.strategy).__name__}")
|
print(f"Strategy type: {type(self.strategy).__name__}")
|
||||||
|
print("💡 Using tf.data.AUTOTUNE for optimal data pipeline performance")
|
||||||
|
print("📝 Ensure create_input_fn uses AUTOTUNE for .map() and .prefetch() operations")
|
||||||
|
|
||||||
# Configure mixed precision for TPU v5e-8
|
# Configure mixed precision for TPU v5e-8
|
||||||
if args.get('use_amp', True):
|
if args.get('use_amp', True):
|
||||||
|
Reference in New Issue
Block a user