-
Notifications
You must be signed in to change notification settings - Fork 199
/
eecabe35-2224-4988-9910-8e8434a0c281.txt
2165 lines (2092 loc) · 134 KB
/
eecabe35-2224-4988-9910-8e8434a0c281.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import sys
with open(sys.argv[0]) as f:
code = f.read() # read the code of this file ASAP, for logging
import uuid
import glob
import time
import contextlib
from dataclasses import dataclass
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import torch._inductor.config as config
from torch.nn.parallel import DistributedDataParallel as DDP
# Use of FlexAttention contributed by @KoszarskyB
from torch.nn.attention.flex_attention import flex_attention, create_block_mask
flex_attention = torch.compile(flex_attention, dynamic=False)
create_block_mask = torch.compile(create_block_mask, dynamic=False)
# -----------------------------------------------------------------------------
# Muon optimizer
def zeropower_via_svd(G, steps=None):
U, S, V = G.svd()
return U @ V.T
@torch.compile
def zeropower_via_newtonschulz5(G, steps=10, eps=1e-7):
"""
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
"""
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
X /= (X.norm() + eps) # ensure top singular value <= 1
if G.size(0) > G.size(1):
X = X.T
for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A # adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
zeropower_backends = dict(svd=zeropower_via_svd, newtonschulz5=zeropower_via_newtonschulz5)
class Muon(torch.optim.Optimizer):
"""
Muon - MomentUm Orthogonalized by Newton-schulz
Muon internally runs standard SGD-momentum, and then performs an orthogonalization post-
processing step, in which each 2D parameter's update is replaced with the nearest orthogonal
matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has
the advantage that it can be stably run in bfloat16 on the GPU.
Some warnings:
- This optimizer assumes that all parameters passed in are 2D.
- It should not be used for the embedding layer, the final fully connected layer, or any {0,1}-D
parameters; those should all be optimized by a standard method (e.g., AdamW).
- To use it with 4D convolutional filters, it works well to just flatten their last 3 dimensions.
- We believe it is unlikely to work well for training with small batch size.
- We believe it may not work well for finetuning pretrained models, but we haven't tested this.
- We have not yet tried this optimizer for training scenarios larger than NanoGPT (124M).
Arguments:
lr: The learning rate used by the internal SGD.
momentum: The momentum used by the internal SGD.
nesterov: Whether to use Nesterov-style momentum in the internal SGD. (recommended)
backend: The chosen backend for the orthogonalization step. (recommended: 'newtonschulz5')
backend_steps: The number of iteration steps to use in the backend, if it is iterative.
"""
def __init__(self, params, lr=0.02, momentum=0.95, nesterov=True,
backend='newtonschulz5', backend_steps=5):
defaults = dict(lr=lr, momentum=momentum, nesterov=nesterov, backend=backend, backend_steps=backend_steps)
super().__init__(params, defaults)
def step(self):
for group in self.param_groups:
lr = group['lr']
momentum = group['momentum']
zeropower_backend = zeropower_backends[group['backend']]
# generate weight updates in distributed fashion
total_params = sum(p.numel() for p in group['params'])
updates_flat = torch.zeros(total_params, device='cuda', dtype=torch.bfloat16)
curr_idx = 0
for i, p in enumerate(group['params']):
# luckily this will perfectly distribute a transformer with multiple of 4 layers to 8 GPUs
if i % int(os.environ['WORLD_SIZE']) == int(os.environ['RANK']):
g = p.grad
assert g is not None
state = self.state[p]
if 'momentum_buffer' not in state:
state['momentum_buffer'] = torch.zeros_like(g)
buf = state['momentum_buffer']
buf.mul_(momentum).add_(g)
g = g.add(buf, alpha=momentum) if group['nesterov'] else buf
g = zeropower_backend(g, steps=group['backend_steps'])
g *= max(1, g.size(0)/g.size(1))**0.5
updates_flat[curr_idx:curr_idx+p.numel()] = g.flatten()
curr_idx += p.numel()
# sync updates across devices. we are not memory-constrained so can do this simple deserialization
dist.all_reduce(updates_flat, op=dist.ReduceOp.SUM)
# deserialize and apply updates
curr_idx = 0
for p in group['params']:
g = updates_flat[curr_idx:curr_idx+p.numel()].view_as(p.data).type_as(p.data)
p.data.add_(g, alpha=-lr)
curr_idx += p.numel()
# -----------------------------------------------------------------------------
# PyTorch nn.Module definitions for the GPT-2 model
def norm(x):
return F.rms_norm(x, (x.size(-1),))
class CastedLinear(nn.Linear):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features, bias=False)
def forward(self, x):
return F.linear(x, self.weight.to(x.dtype))
class Rotary(torch.nn.Module):
def __init__(self, dim, base=10000):
super().__init__()
self.register_buffer('inv_freq', (1 / base) ** (torch.arange(0, dim, 2) / dim))
self.seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
def forward(self, x):
seq_len = x.shape[1]
if seq_len != self.seq_len_cached:
t = torch.arange(seq_len, device=x.device)
freqs = torch.outer(t, self.inv_freq)
self.seq_len_cached = seq_len
self.cos_cached = freqs.cos()
self.sin_cached = freqs.sin()
cos, sin = self.cos_cached[None, :, None, :], self.sin_cached[None, :, None, :]
# apply_rotary_emb(x, cos, sin)
x1, x2 = x.chunk(2, dim=3)
y1 = x1 * cos + x2 * sin
y2 = x1 * (-sin) + x2 * cos
return torch.cat((y1, y2), 3).type_as(x)
class CausalSelfAttention(nn.Module):
def __init__(self, dim, n_head):
super().__init__()
assert dim % n_head == 0
self.n_head = n_head
self.c_q = CastedLinear(dim, dim)
self.c_k = CastedLinear(dim, dim)
self.c_v = CastedLinear(dim, dim)
# value residual lambda
self.lamb = nn.Parameter(torch.tensor(0.5)) # @Grad62304977
# rotary embeddings
self.rotary = Rotary(dim // n_head) # dim // n_head = head_dim
# output projection
self.c_proj = CastedLinear(dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x, vi, block_mask):
B, T = x.size(0), x.size(1) # batch size, sequence length
assert B == 1, "Must use batch size = 1 for FlexAttention"
q = self.c_q(x).view(B, T, self.n_head, -1)
k = self.c_k(x).view(B, T, self.n_head, -1)
v = self.c_v(x).view(B, T, self.n_head, -1)
v = (1 - self.lamb) * v + self.lamb * vi.view_as(v) # @Grad62304977
q, k = norm(q), norm(k) # QK norm suggested by @Grad62304977
q, k = self.rotary(q), self.rotary(k)
y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask)
y = y.transpose(1, 2).contiguous().view_as(x) # re-assemble all head outputs side by side
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self, dim):
super().__init__()
self.c_fc = CastedLinear(dim, 4 * dim)
self.c_proj = CastedLinear(4 * dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x):
x = self.c_fc(x)
x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977
x = self.c_proj(x)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = CausalSelfAttention(config.n_embd, config.n_head)
self.mlp = MLP(config.n_embd)
self.lambdas = nn.Parameter(torch.tensor([1., 0.]))
def forward(self, x, vi, x0, block_mask):
x = self.lambdas[0] * x + self.lambdas[1] * x0
x = x + self.attn(norm(x), vi, block_mask)
x = x + self.mlp(norm(x))
return x
# -----------------------------------------------------------------------------
# The main GPT-2 model
@dataclass
class GPTConfig:
vocab_size : int = 50304
n_layer : int = 12
n_head : int = 6 # head dim 128 suggested by @Grad62304977
n_embd : int = 768
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
# U-net design by @brendanh0gan
self.num_encoder_layers = config.n_layer // 2 # Half of the layers for encoder
self.num_decoder_layers = config.n_layer - self.num_encoder_layers # Remaining for decoder
# Add learnable skip connection weights for decoder layers
self.skip_weights = nn.Parameter(torch.ones(self.num_decoder_layers))
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
# token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual learning
vte = nn.Embedding(config.vocab_size, config.n_embd*12),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
))
self.lm_head = CastedLinear(config.n_embd, config.vocab_size)
self.lm_head.weight.data.zero_() # @Grad62304977
def forward(self, idx, target, attn_blocksize):
docs = (idx == 50256).cumsum(0)
def document_causal_mask(b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
document_mask = docs[q_idx] == docs[kv_idx]
window_mask = q_idx - kv_idx < attn_blocksize
return causal_mask & document_mask & window_mask
S = len(idx)
block_mask = create_block_mask(document_causal_mask, None, None, S, S, device="cuda", _compile=True)
# forward the GPT model itself
x = self.transformer.wte(idx[None]) # token embeddings of shape (b, t, n_embd)
x = norm(x) # @Grad62304977
x0 = x
vi = self.transformer.vte(idx[None]).chunk(12, dim=-1)
# Store outputs for U-Net skip connections
skip_connections = []
# Encoder pass - process only the first half of the blocks
for i in range(self.num_encoder_layers):
x = self.transformer.h[i](x, vi[i], x0, block_mask)
skip_connections.append(x)
# Decoder pass - process the remaining blocks with weighted skip connections
for i in range(self.num_decoder_layers):
x = x + self.skip_weights[i] * skip_connections.pop()
x = self.transformer.h[self.num_encoder_layers + i](x, vi[self.num_encoder_layers+i], x0, block_mask)
x = norm(x)
logits = self.lm_head(x)
logits = 30 * torch.tanh(logits / 30) # @Grad62304977
logits = logits.float()
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target.view(-1))
return loss
# -----------------------------------------------------------------------------
# Our own simple Distributed Data Loader
def _peek_data_shard(filename):
# only reads the header, returns header data
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
if header[0] != 20240520:
print("ERROR: magic number mismatch in the data .bin file!")
print("---> HINT: Are you passing in a correct file with --input_bin?")
print("---> HINT: Dataset encoding changed recently, re-run data prepro or refer again to README")
print("---> HINT: For example re-run: `python dev/data/tinyshakespeare.py`, then re-try")
exit(1)
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
return ntok # for now just return the number of tokens
def _load_data_shard(filename):
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
assert header[0] == 20240520, "magic number mismatch in the data .bin file"
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
# the rest of it are tokens, stored as uint16
tokens = np.frombuffer(f.read(), dtype=np.uint16)
assert len(tokens) == ntok, "number of tokens read does not match header?"
return tokens
class DistributedDataLoader:
def __init__(self, filename_pattern, T, process_rank, num_processes):
self.process_rank = process_rank
self.num_processes = num_processes
self.T = T
# glob files that match the pattern
self.files = sorted(glob.glob(filename_pattern))
assert len(self.files) > 0, f"did not find any files that match the pattern {filename_pattern}"
# load and validate all data shards, count number of tokens in total
ntok_total = 0
for fname in self.files:
shard_ntok = _peek_data_shard(fname)
assert shard_ntok >= num_processes * T + 1
ntok_total += int(shard_ntok)
self.ntok_total = ntok_total
self.reset()
def reset(self):
self.current_shard = -1
self.advance()
def advance(self): # advance to next data shard
self.current_shard = (self.current_shard + 1) % len(self.files)
self.current_position = self.process_rank * self.T
self.tokens = _load_data_shard(self.files[self.current_shard])
def next_batch(self):
batch_size = self.T * self.num_processes
buf = self.tokens[self.current_position:self.current_position+self.T+1]
buf = torch.tensor(buf.astype(np.int32), dtype=torch.long)
x = buf[:-1] # inputs
y = buf[1:] # targets
# advance current position and load next shard if necessary
self.current_position += batch_size
if self.current_position + batch_size >= len(self.tokens):
self.advance()
return x.cuda(), y.cuda()
# -----------------------------------------------------------------------------
# int main
@dataclass
class Hyperparameters:
# data hyperparams
input_bin : str = 'data/fineweb10B/fineweb_train_*.bin' # input .bin to train on
input_val_bin : str = 'data/fineweb10B/fineweb_val_*.bin' # input .bin to eval validation loss on
# optimization hyperparams
batch_size : int = 8 # batch size, in sequences, across all devices
sequence_length : int = 64*1024 # sequence length, in tokens
num_iterations : int = 1530 # number of iterations to run
warmup_iters : int = 0
cooldown_iters : int = 600 # number of iterations of linear warmup/cooldown for triangular or trapezoidal schedule
weight_decay : float = 0
# evaluation and logging hyperparams
val_loss_every : int = 125 # every how many steps to evaluate val loss? 0 for only at the end
val_tokens : int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons
save_every : int = 0 # every how many steps to save the checkpoint? 0 for only at the end
args = Hyperparameters()
# set up DDP (distributed data parallel). torchrun sets this env variable
assert torch.cuda.is_available()
dist.init_process_group(backend='nccl')
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
print(f"using device: {device}")
master_process = (ddp_rank == 0) # this process will do logging, checkpointing etc.
# begin logging
logfile = None
if master_process:
run_id = str(uuid.uuid4())
logdir = 'logs/%s/' % run_id
os.makedirs(logdir, exist_ok=True)
logfile = 'logs/%s.txt' % run_id
# create the log file
with open(logfile, "w") as f:
# begin the log by printing this file (the Python code)
f.write(code)
f.write('='*100 + '\n')
def print0(s, logonly=False):
if master_process:
with open(logfile, "a") as f:
if not logonly:
print(s)
f.write(s+'\n')
# log information about the hardware/software environment this is running on
# and print the full `nvidia-smi` to file
print0(f"Running pytorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}\nnvidia-smi:")
import subprocess
result = subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
print0(f'{result.stdout}', logonly=True)
print0('='*100, logonly=True)
# convenience variables
T = args.sequence_length
# calculate the number of steps to take in the val loop.
assert args.val_tokens % (T * ddp_world_size) == 0
val_steps = args.val_tokens // (T * ddp_world_size)
# calculate the steps of gradient accumulation required to attain the desired global batch size.
assert args.batch_size % (ddp_world_size) == 0
train_accumulation_steps = args.batch_size // ddp_world_size
# load tokens
train_loader = DistributedDataLoader(args.input_bin, T, ddp_rank, ddp_world_size)
val_loader = DistributedDataLoader(args.input_val_bin, T, ddp_rank, ddp_world_size)
print0(f"Training DataLoader: total number of tokens: {train_loader.ntok_total} across {len(train_loader.files)} files")
print0(f"Validation DataLoader: total number of tokens: {val_loader.ntok_total} across {len(val_loader.files)} files")
print0('='*100, logonly=True)
x, y = train_loader.next_batch()
# there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. suggested to me by @Grad62304977.
# this originates from Karpathy's experiments.
num_vocab = 50304
model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=12, n_head=6, n_embd=768))
model = model.cuda().bfloat16()
for m in model.modules():
if isinstance(m, CastedLinear):
m.float()
if hasattr(config, "coordinate_descent_tuning"):
config.coordinate_descent_tuning = True # suggested by @Chillee
model = torch.compile(model)
# here we wrap model into DDP container
model = DDP(model, device_ids=[ddp_local_rank])
raw_model = model.module # always contains the "raw" unwrapped model
# init the optimizer(s)
optimizer1 = torch.optim.Adam([raw_model.transformer.wte.weight, raw_model.transformer.vte.weight], lr=0.6, betas=(0.8, 0.95), fused=True)
optimizer2 = torch.optim.Adam([raw_model.lm_head.weight], lr=0.008, betas=(0.8, 0.95), fused=True)
params = list(raw_model.transformer.h.parameters())
matrix_params = [p for p in params if p.ndim == 2]
scalar_params = [p for p in params if p.ndim < 2] + [raw_model.skip_weights]
optimizer3 = Muon(matrix_params, lr=0.05, momentum=0.95)
optimizer4 = torch.optim.Adam(scalar_params, lr=0.04, betas=(0.8, 0.95), fused=True) # note that this learning rate is neither sensitive nor tuned
optimizers = [optimizer1, optimizer2, optimizer3, optimizer4]
# learning rate decay scheduler (linear warmup and cooldown)
def get_lr(it):
assert it <= args.num_iterations
# 1) linear warmup for warmup_iters steps
if it < args.warmup_iters:
return (it+1) / args.warmup_iters
# 2) constant lr for a while
elif it < args.num_iterations - args.cooldown_iters:
return 1.0
# 3) linear cooldown
else:
decay_ratio = (args.num_iterations - it) / args.cooldown_iters
return decay_ratio
schedulers = [torch.optim.lr_scheduler.LambdaLR(opt, get_lr) for opt in optimizers]
# Start training loop
training_time_ms = 0
# start the clock
torch.cuda.synchronize()
t0 = time.time()
# begin training
for step in range(args.num_iterations + 1):
last_step = (step == args.num_iterations)
# This effectively ignores timing first 10 steps, which are slower for weird reasons.
# Alternately, and slightly more correctly in terms of benchmarking, we could do 10
# steps with dummy data first, and then re-initialize the model and reset the loader.
if step == 10:
training_time_ms = 0
t0 = time.time()
timed_steps = float('nan') if step <= 11 else (step - 10) + 1 # <= 11 to avoid bug in val
# Set the attention blocksize for the current step, in chunks of 64. By @fernbear.bsky.social
attn_blocksize = torch.tensor(64*((step/args.num_iterations * (1792 - 64) + 64)//64), dtype=torch.int, device='cuda')
# once in a while evaluate the validation dataset
if (last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# run validation batches
model.eval()
val_loader.reset()
val_loss = 0.0
for _ in range(val_steps):
with torch.no_grad():
x_val, y_val = val_loader.next_batch()
val_loss += model(x_val, y_val, attn_blocksize=attn_blocksize)
dist.all_reduce(val_loss, op=dist.ReduceOp.AVG)
val_loss /= val_steps
# log val loss to console and to logfile
print0(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms')
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
if master_process and (last_step or (args.save_every > 0 and step % args.save_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# save the state of the training process
log = dict(step=step, code=code, model=raw_model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers])
torch.save(log, 'logs/%s/state_step%06d.pt' % (run_id, step))
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
# bit confusing: we want to make sure to eval on 0th iteration
# but also after the very last iteration. so we loop for step <= num_iterations
# instead of just < num_iterations (one extra due to <=), only to do
# the validation/sampling one last time, and then we break right here as we're done.
if last_step:
break
# --------------- TRAINING SECTION BEGIN -----------------
model.train()
for i in range(1, train_accumulation_steps+1):
ctx = model.no_sync() if i < train_accumulation_steps else contextlib.nullcontext()
with ctx: # there's no need to sync gradients every accumulation step
# forward pass
loss = model(x, y, attn_blocksize=attn_blocksize)
# advance the dataset for the next batch
x, y = train_loader.next_batch()
# backward pass
loss.backward()
train_loss = loss.detach()
for p in model.parameters():
p.grad /= train_accumulation_steps
# momentum warmup for Muon
frac = min(step/300, 1)
optimizer3.param_groups[0]['momentum'] = (1 - frac) * 0.85 + frac * 0.95
# step the optimizers and schedulers
for opt, sched in zip(optimizers, schedulers):
opt.step()
sched.step()
# null the gradients
model.zero_grad(set_to_none=True)
# --------------- TRAINING SECTION END -------------------
# everything that follows now is just diagnostics, prints, logging, etc.
#dist.all_reduce(train_loss, op=dist.ReduceOp.AVG) # all-reducing the training loss would be more correct in terms of logging, but slower
approx_time = training_time_ms + 1000 * (time.time() - t0)
print0(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms")
if master_process:
print(f"peak memory consumption: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB")
# -------------------------------------------------------------------------
# clean up nice
dist.destroy_process_group()
====================================================================================================
Running pytorch 2.6.0.dev20241203+cu124 compiled for CUDA 12.4
nvidia-smi:
Thu Dec 5 00:51:23 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA H100 80GB HBM3 On | 00000000:19:00.0 Off | 0 |
| N/A 32C P0 74W / 700W | 3MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA H100 80GB HBM3 On | 00000000:3B:00.0 Off | 0 |
| N/A 27C P0 100W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA H100 80GB HBM3 On | 00000000:4C:00.0 Off | 0 |
| N/A 28C P0 116W / 700W | 529MiB / 81559MiB | 1% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 3 NVIDIA H100 80GB HBM3 On | 00000000:5D:00.0 Off | 0 |
| N/A 31C P0 105W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 4 NVIDIA H100 80GB HBM3 On | 00000000:9B:00.0 Off | 0 |
| N/A 32C P0 117W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 5 NVIDIA H100 80GB HBM3 On | 00000000:BB:00.0 Off | 0 |
| N/A 26C P0 108W / 700W | 529MiB / 81559MiB | 2% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 6 NVIDIA H100 80GB HBM3 On | 00000000:CB:00.0 Off | 0 |
| N/A 32C P0 121W / 700W | 115MiB / 81559MiB | 1% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 7 NVIDIA H100 80GB HBM3 On | 00000000:DB:00.0 Off | 0 |
| N/A 27C P0 116W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
+---------------------------------------------------------------------------------------+
====================================================================================================
Training DataLoader: total number of tokens: 1100000000 across 11 files
Validation DataLoader: total number of tokens: 100000000 across 1 files
====================================================================================================
step:0/1530 val_loss:10.8258 train_time:0ms step_avg:nanms
step:1/1530 train_loss:10.8258 train_time:32296ms step_avg:nanms
step:2/1530 train_loss:10.0731 train_time:32407ms step_avg:nanms
step:3/1530 train_loss:8.3621 train_time:32568ms step_avg:nanms
step:4/1530 train_loss:7.5801 train_time:32728ms step_avg:nanms
step:5/1530 train_loss:7.5010 train_time:32887ms step_avg:nanms
step:6/1530 train_loss:6.9784 train_time:33047ms step_avg:nanms
step:7/1530 train_loss:7.2527 train_time:33207ms step_avg:nanms
step:8/1530 train_loss:6.7521 train_time:33367ms step_avg:nanms
step:9/1530 train_loss:6.6315 train_time:33527ms step_avg:nanms
step:10/1530 train_loss:6.5198 train_time:33687ms step_avg:nanms
step:11/1530 train_loss:6.4634 train_time:114ms step_avg:nanms
step:12/1530 train_loss:6.3560 train_time:273ms step_avg:nanms
step:13/1530 train_loss:6.2161 train_time:434ms step_avg:144.65ms
step:14/1530 train_loss:6.1988 train_time:593ms step_avg:148.28ms
step:15/1530 train_loss:6.1283 train_time:753ms step_avg:150.66ms
step:16/1530 train_loss:6.1145 train_time:913ms step_avg:152.15ms
step:17/1530 train_loss:6.1825 train_time:1073ms step_avg:153.30ms
step:18/1530 train_loss:5.9777 train_time:1234ms step_avg:154.20ms
step:19/1530 train_loss:5.9787 train_time:1393ms step_avg:154.73ms
step:20/1530 train_loss:5.6834 train_time:1553ms step_avg:155.27ms
step:21/1530 train_loss:5.9522 train_time:1713ms step_avg:155.75ms
step:22/1530 train_loss:6.1648 train_time:1873ms step_avg:156.09ms
step:23/1530 train_loss:5.8383 train_time:2034ms step_avg:156.46ms
step:24/1530 train_loss:6.0122 train_time:2193ms step_avg:156.67ms
step:25/1530 train_loss:5.6690 train_time:2353ms step_avg:156.89ms
step:26/1530 train_loss:5.5787 train_time:2513ms step_avg:157.05ms
step:27/1530 train_loss:5.7481 train_time:2673ms step_avg:157.24ms
step:28/1530 train_loss:5.4134 train_time:2833ms step_avg:157.39ms
step:29/1530 train_loss:5.6576 train_time:2993ms step_avg:157.54ms
step:30/1530 train_loss:5.4556 train_time:3153ms step_avg:157.63ms
step:31/1530 train_loss:5.4171 train_time:3313ms step_avg:157.74ms
step:32/1530 train_loss:5.2816 train_time:3473ms step_avg:157.87ms
step:33/1530 train_loss:5.5756 train_time:3633ms step_avg:157.96ms
step:34/1530 train_loss:5.4900 train_time:3794ms step_avg:158.07ms
step:35/1530 train_loss:5.6134 train_time:3954ms step_avg:158.14ms
step:36/1530 train_loss:5.5489 train_time:4113ms step_avg:158.21ms
step:37/1530 train_loss:5.4465 train_time:4273ms step_avg:158.27ms
step:38/1530 train_loss:5.3060 train_time:4433ms step_avg:158.33ms
step:39/1530 train_loss:5.3263 train_time:4593ms step_avg:158.39ms
step:40/1530 train_loss:5.2508 train_time:4753ms step_avg:158.44ms
step:41/1530 train_loss:5.2227 train_time:4914ms step_avg:158.50ms
step:42/1530 train_loss:5.1580 train_time:5074ms step_avg:158.55ms
step:43/1530 train_loss:5.2548 train_time:5234ms step_avg:158.60ms
step:44/1530 train_loss:5.2369 train_time:5393ms step_avg:158.62ms
step:45/1530 train_loss:5.3814 train_time:5553ms step_avg:158.65ms
step:46/1530 train_loss:5.1693 train_time:5713ms step_avg:158.70ms
step:47/1530 train_loss:5.0594 train_time:5873ms step_avg:158.72ms
step:48/1530 train_loss:5.2329 train_time:6033ms step_avg:158.76ms
step:49/1530 train_loss:5.1355 train_time:6193ms step_avg:158.78ms
step:50/1530 train_loss:5.2375 train_time:6353ms step_avg:158.82ms
step:51/1530 train_loss:5.1395 train_time:6514ms step_avg:158.87ms
step:52/1530 train_loss:5.0339 train_time:6673ms step_avg:158.88ms
step:53/1530 train_loss:5.1743 train_time:6833ms step_avg:158.91ms
step:54/1530 train_loss:5.0279 train_time:6992ms step_avg:158.92ms
step:55/1530 train_loss:5.4164 train_time:7153ms step_avg:158.96ms
step:56/1530 train_loss:5.0259 train_time:7313ms step_avg:158.98ms
step:57/1530 train_loss:4.8817 train_time:7473ms step_avg:159.00ms
step:58/1530 train_loss:5.0504 train_time:7633ms step_avg:159.02ms
step:59/1530 train_loss:5.0139 train_time:7793ms step_avg:159.05ms
step:60/1530 train_loss:5.1341 train_time:7953ms step_avg:159.06ms
step:61/1530 train_loss:4.8665 train_time:8113ms step_avg:159.07ms
step:62/1530 train_loss:4.9991 train_time:8273ms step_avg:159.09ms
step:63/1530 train_loss:4.9855 train_time:8433ms step_avg:159.12ms
step:64/1530 train_loss:4.9541 train_time:8593ms step_avg:159.12ms
step:65/1530 train_loss:4.8307 train_time:8753ms step_avg:159.15ms
step:66/1530 train_loss:4.9227 train_time:8913ms step_avg:159.17ms
step:67/1530 train_loss:4.8255 train_time:9073ms step_avg:159.17ms
step:68/1530 train_loss:5.1113 train_time:9233ms step_avg:159.19ms
step:69/1530 train_loss:4.7412 train_time:9393ms step_avg:159.21ms
step:70/1530 train_loss:4.8557 train_time:9554ms step_avg:159.23ms
step:71/1530 train_loss:4.9956 train_time:9713ms step_avg:159.24ms
step:72/1530 train_loss:4.9088 train_time:9873ms step_avg:159.25ms
step:73/1530 train_loss:4.7870 train_time:10033ms step_avg:159.26ms
step:74/1530 train_loss:4.9157 train_time:10193ms step_avg:159.27ms
step:75/1530 train_loss:4.8808 train_time:10353ms step_avg:159.28ms
step:76/1530 train_loss:4.8039 train_time:10513ms step_avg:159.29ms
step:77/1530 train_loss:4.9307 train_time:10673ms step_avg:159.30ms
step:78/1530 train_loss:5.1208 train_time:10833ms step_avg:159.31ms
step:79/1530 train_loss:4.8562 train_time:10993ms step_avg:159.31ms
step:80/1530 train_loss:4.8781 train_time:11153ms step_avg:159.32ms
step:81/1530 train_loss:4.6727 train_time:11313ms step_avg:159.34ms
step:82/1530 train_loss:4.8449 train_time:11472ms step_avg:159.34ms
step:83/1530 train_loss:4.8153 train_time:11634ms step_avg:159.36ms
step:84/1530 train_loss:4.8069 train_time:11792ms step_avg:159.36ms
step:85/1530 train_loss:4.6451 train_time:11953ms step_avg:159.37ms
step:86/1530 train_loss:4.8504 train_time:12113ms step_avg:159.39ms
step:87/1530 train_loss:4.7697 train_time:12273ms step_avg:159.39ms
step:88/1530 train_loss:4.7739 train_time:12433ms step_avg:159.40ms
step:89/1530 train_loss:4.7237 train_time:12593ms step_avg:159.41ms
step:90/1530 train_loss:4.6703 train_time:12753ms step_avg:159.41ms
step:91/1530 train_loss:4.6665 train_time:12914ms step_avg:159.43ms
step:92/1530 train_loss:4.8327 train_time:13073ms step_avg:159.43ms
step:93/1530 train_loss:4.6463 train_time:13234ms step_avg:159.44ms
step:94/1530 train_loss:4.6534 train_time:13393ms step_avg:159.44ms
step:95/1530 train_loss:4.7191 train_time:13553ms step_avg:159.45ms
step:96/1530 train_loss:4.6037 train_time:13713ms step_avg:159.45ms
step:97/1530 train_loss:4.6643 train_time:13873ms step_avg:159.46ms
step:98/1530 train_loss:4.6058 train_time:14034ms step_avg:159.47ms
step:99/1530 train_loss:4.6968 train_time:14193ms step_avg:159.48ms
step:100/1530 train_loss:4.6959 train_time:14353ms step_avg:159.48ms
step:101/1530 train_loss:4.5908 train_time:14513ms step_avg:159.49ms
step:102/1530 train_loss:4.7451 train_time:14674ms step_avg:159.49ms
step:103/1530 train_loss:4.6093 train_time:14834ms step_avg:159.51ms
step:104/1530 train_loss:4.5542 train_time:14994ms step_avg:159.51ms
step:105/1530 train_loss:4.5726 train_time:15154ms step_avg:159.51ms
step:106/1530 train_loss:4.6628 train_time:15314ms step_avg:159.52ms
step:107/1530 train_loss:4.5440 train_time:15474ms step_avg:159.53ms
step:108/1530 train_loss:4.3774 train_time:15634ms step_avg:159.53ms
step:109/1530 train_loss:4.5066 train_time:15794ms step_avg:159.53ms
step:110/1530 train_loss:4.5191 train_time:15954ms step_avg:159.54ms
step:111/1530 train_loss:4.4613 train_time:16115ms step_avg:159.55ms
step:112/1530 train_loss:4.6133 train_time:16275ms step_avg:159.55ms
step:113/1530 train_loss:4.5172 train_time:16435ms step_avg:159.56ms
step:114/1530 train_loss:4.4018 train_time:16594ms step_avg:159.55ms
step:115/1530 train_loss:4.5391 train_time:16756ms step_avg:159.58ms
step:116/1530 train_loss:4.4908 train_time:16921ms step_avg:159.63ms
step:117/1530 train_loss:4.3941 train_time:17085ms step_avg:159.67ms
step:118/1530 train_loss:4.6177 train_time:17249ms step_avg:159.72ms
step:119/1530 train_loss:4.4822 train_time:17413ms step_avg:159.76ms
step:120/1530 train_loss:4.3621 train_time:17577ms step_avg:159.79ms
step:121/1530 train_loss:4.3284 train_time:17740ms step_avg:159.82ms
step:122/1530 train_loss:4.4732 train_time:17905ms step_avg:159.87ms
step:123/1530 train_loss:4.2951 train_time:18069ms step_avg:159.90ms
step:124/1530 train_loss:4.6101 train_time:18232ms step_avg:159.93ms
step:125/1530 train_loss:4.4822 train_time:18395ms step_avg:159.95ms
step:125/1530 val_loss:4.4357 train_time:18442ms step_avg:160.37ms
step:126/1530 train_loss:4.4461 train_time:18562ms step_avg:160.02ms
step:127/1530 train_loss:4.4481 train_time:18727ms step_avg:160.06ms
step:128/1530 train_loss:4.3915 train_time:18890ms step_avg:160.09ms
step:129/1530 train_loss:4.7043 train_time:19053ms step_avg:160.11ms
step:130/1530 train_loss:4.3884 train_time:19219ms step_avg:160.16ms
step:131/1530 train_loss:4.4135 train_time:19383ms step_avg:160.19ms
step:132/1530 train_loss:4.3814 train_time:19547ms step_avg:160.22ms
step:133/1530 train_loss:4.4623 train_time:19711ms step_avg:160.25ms
step:134/1530 train_loss:4.2705 train_time:19875ms step_avg:160.28ms
step:135/1530 train_loss:4.4582 train_time:20038ms step_avg:160.31ms
step:136/1530 train_loss:4.2311 train_time:20203ms step_avg:160.34ms
step:137/1530 train_loss:4.3823 train_time:20367ms step_avg:160.37ms
step:138/1530 train_loss:4.3052 train_time:20530ms step_avg:160.39ms
step:139/1530 train_loss:4.4034 train_time:20693ms step_avg:160.41ms
step:140/1530 train_loss:4.4915 train_time:20858ms step_avg:160.44ms
step:141/1530 train_loss:4.3257 train_time:21021ms step_avg:160.47ms
step:142/1530 train_loss:4.3195 train_time:21186ms step_avg:160.50ms
step:143/1530 train_loss:4.2692 train_time:21349ms step_avg:160.52ms
step:144/1530 train_loss:4.3648 train_time:21513ms step_avg:160.54ms
step:145/1530 train_loss:4.3169 train_time:21677ms step_avg:160.57ms
step:146/1530 train_loss:4.1824 train_time:21843ms step_avg:160.61ms
step:147/1530 train_loss:4.3364 train_time:22008ms step_avg:160.64ms
step:148/1530 train_loss:4.3716 train_time:22171ms step_avg:160.66ms
step:149/1530 train_loss:4.3133 train_time:22335ms step_avg:160.69ms
step:150/1530 train_loss:4.4540 train_time:22501ms step_avg:160.72ms
step:151/1530 train_loss:4.2829 train_time:22664ms step_avg:160.74ms
step:152/1530 train_loss:4.2832 train_time:22828ms step_avg:160.76ms
step:153/1530 train_loss:4.3736 train_time:22993ms step_avg:160.79ms
step:154/1530 train_loss:4.3797 train_time:23157ms step_avg:160.82ms
step:155/1530 train_loss:4.2817 train_time:23323ms step_avg:160.85ms
step:156/1530 train_loss:4.3516 train_time:23486ms step_avg:160.86ms
step:157/1530 train_loss:4.3996 train_time:23649ms step_avg:160.88ms
step:158/1530 train_loss:4.2454 train_time:23813ms step_avg:160.90ms
step:159/1530 train_loss:4.3142 train_time:23977ms step_avg:160.92ms
step:160/1530 train_loss:4.1481 train_time:24141ms step_avg:160.94ms
step:161/1530 train_loss:4.3605 train_time:24304ms step_avg:160.95ms
step:162/1530 train_loss:4.3684 train_time:24467ms step_avg:160.97ms
step:163/1530 train_loss:4.3448 train_time:24630ms step_avg:160.98ms
step:164/1530 train_loss:4.1983 train_time:24794ms step_avg:161.00ms
step:165/1530 train_loss:4.2938 train_time:24958ms step_avg:161.02ms
step:166/1530 train_loss:4.3437 train_time:25122ms step_avg:161.04ms
step:167/1530 train_loss:4.2175 train_time:25286ms step_avg:161.06ms
step:168/1530 train_loss:4.3041 train_time:25449ms step_avg:161.07ms
step:169/1530 train_loss:4.1739 train_time:25613ms step_avg:161.09ms
step:170/1530 train_loss:4.0398 train_time:25779ms step_avg:161.12ms
step:171/1530 train_loss:4.2136 train_time:25942ms step_avg:161.13ms
step:172/1530 train_loss:4.2121 train_time:26105ms step_avg:161.14ms
step:173/1530 train_loss:4.2723 train_time:26267ms step_avg:161.15ms
step:174/1530 train_loss:4.4297 train_time:26429ms step_avg:161.15ms
step:175/1530 train_loss:4.2439 train_time:26592ms step_avg:161.16ms
step:176/1530 train_loss:4.0983 train_time:26754ms step_avg:161.17ms
step:177/1530 train_loss:4.0756 train_time:26919ms step_avg:161.19ms
step:178/1530 train_loss:4.1911 train_time:27082ms step_avg:161.20ms
step:179/1530 train_loss:4.1323 train_time:27245ms step_avg:161.21ms
step:180/1530 train_loss:4.1324 train_time:27407ms step_avg:161.22ms
step:181/1530 train_loss:4.3007 train_time:27569ms step_avg:161.23ms
step:182/1530 train_loss:4.1555 train_time:27732ms step_avg:161.23ms
step:183/1530 train_loss:4.1483 train_time:27896ms step_avg:161.25ms
step:184/1530 train_loss:4.1305 train_time:28059ms step_avg:161.26ms
step:185/1530 train_loss:4.2124 train_time:28223ms step_avg:161.27ms
step:186/1530 train_loss:4.1706 train_time:28385ms step_avg:161.28ms
step:187/1530 train_loss:4.2399 train_time:28548ms step_avg:161.29ms
step:188/1530 train_loss:4.1759 train_time:28851ms step_avg:162.08ms
step:189/1530 train_loss:4.1170 train_time:29180ms step_avg:163.02ms
step:190/1530 train_loss:4.2149 train_time:29344ms step_avg:163.02ms
step:191/1530 train_loss:4.0817 train_time:29506ms step_avg:163.02ms
step:192/1530 train_loss:4.0368 train_time:29668ms step_avg:163.01ms
step:193/1530 train_loss:4.2503 train_time:29830ms step_avg:163.01ms
step:194/1530 train_loss:4.1733 train_time:29993ms step_avg:163.01ms
step:195/1530 train_loss:4.3529 train_time:30156ms step_avg:163.01ms
step:196/1530 train_loss:4.1869 train_time:30320ms step_avg:163.01ms
step:197/1530 train_loss:4.0456 train_time:30483ms step_avg:163.01ms
step:198/1530 train_loss:4.1813 train_time:30646ms step_avg:163.01ms
step:199/1530 train_loss:4.0342 train_time:30809ms step_avg:163.01ms
step:200/1530 train_loss:4.1146 train_time:30972ms step_avg:163.01ms
step:201/1530 train_loss:4.0330 train_time:31134ms step_avg:163.01ms
step:202/1530 train_loss:4.2757 train_time:31299ms step_avg:163.02ms
step:203/1530 train_loss:4.0638 train_time:31462ms step_avg:163.02ms
step:204/1530 train_loss:4.1903 train_time:31626ms step_avg:163.02ms
step:205/1530 train_loss:4.2513 train_time:31789ms step_avg:163.02ms
step:206/1530 train_loss:3.9446 train_time:31950ms step_avg:163.01ms
step:207/1530 train_loss:4.0839 train_time:32114ms step_avg:163.02ms
step:208/1530 train_loss:4.1009 train_time:32276ms step_avg:163.01ms
step:209/1530 train_loss:4.2430 train_time:32439ms step_avg:163.01ms
step:210/1530 train_loss:4.1899 train_time:32603ms step_avg:163.02ms
step:211/1530 train_loss:4.0690 train_time:32765ms step_avg:163.01ms
step:212/1530 train_loss:4.1349 train_time:32927ms step_avg:163.00ms
step:213/1530 train_loss:4.0561 train_time:33090ms step_avg:163.01ms
step:214/1530 train_loss:4.1174 train_time:33252ms step_avg:163.00ms
step:215/1530 train_loss:3.9589 train_time:33415ms step_avg:163.00ms
step:216/1530 train_loss:4.0097 train_time:33580ms step_avg:163.01ms
step:217/1530 train_loss:4.0115 train_time:33744ms step_avg:163.02ms
step:218/1530 train_loss:4.0901 train_time:33906ms step_avg:163.01ms
step:219/1530 train_loss:4.0819 train_time:34068ms step_avg:163.01ms
step:220/1530 train_loss:4.0892 train_time:34230ms step_avg:163.00ms
step:221/1530 train_loss:4.1052 train_time:34393ms step_avg:163.00ms
step:222/1530 train_loss:4.0016 train_time:34556ms step_avg:163.00ms
step:223/1530 train_loss:3.9906 train_time:34720ms step_avg:163.00ms
step:224/1530 train_loss:4.3012 train_time:34883ms step_avg:163.00ms
step:225/1530 train_loss:3.9274 train_time:35046ms step_avg:163.00ms
step:226/1530 train_loss:3.9980 train_time:35207ms step_avg:163.00ms
step:227/1530 train_loss:3.9856 train_time:35370ms step_avg:162.99ms
step:228/1530 train_loss:4.1430 train_time:35534ms step_avg:163.00ms
step:229/1530 train_loss:3.9268 train_time:35703ms step_avg:163.03ms
step:230/1530 train_loss:4.0414 train_time:35867ms step_avg:163.03ms
step:231/1530 train_loss:3.9026 train_time:36033ms step_avg:163.05ms
step:232/1530 train_loss:3.9736 train_time:36200ms step_avg:163.06ms
step:233/1530 train_loss:4.0946 train_time:36365ms step_avg:163.07ms
step:234/1530 train_loss:4.0330 train_time:36531ms step_avg:163.08ms
step:235/1530 train_loss:3.9035 train_time:36698ms step_avg:163.10ms
step:236/1530 train_loss:4.0828 train_time:36864ms step_avg:163.11ms
step:237/1530 train_loss:4.0794 train_time:37029ms step_avg:163.12ms
step:238/1530 train_loss:3.9471 train_time:37194ms step_avg:163.13ms
step:239/1530 train_loss:4.0778 train_time:37361ms step_avg:163.15ms
step:240/1530 train_loss:4.1053 train_time:37526ms step_avg:163.16ms
step:241/1530 train_loss:3.9618 train_time:37691ms step_avg:163.17ms
step:242/1530 train_loss:4.1398 train_time:37858ms step_avg:163.18ms
step:243/1530 train_loss:4.0138 train_time:38024ms step_avg:163.19ms
step:244/1530 train_loss:4.0919 train_time:38191ms step_avg:163.21ms
step:245/1530 train_loss:4.1469 train_time:38356ms step_avg:163.22ms
step:246/1530 train_loss:4.0544 train_time:38522ms step_avg:163.23ms
step:247/1530 train_loss:4.0083 train_time:38688ms step_avg:163.24ms
step:248/1530 train_loss:4.1088 train_time:38853ms step_avg:163.25ms
step:249/1530 train_loss:3.9180 train_time:39018ms step_avg:163.26ms
step:250/1530 train_loss:3.9794 train_time:39185ms step_avg:163.27ms
step:250/1530 val_loss:4.0048 train_time:39233ms step_avg:163.47ms
step:251/1530 train_loss:4.0773 train_time:39355ms step_avg:163.30ms
step:252/1530 train_loss:4.1665 train_time:39522ms step_avg:163.32ms
step:253/1530 train_loss:3.9326 train_time:39690ms step_avg:163.33ms
step:254/1530 train_loss:3.8853 train_time:39856ms step_avg:163.34ms
step:255/1530 train_loss:4.0790 train_time:40021ms step_avg:163.35ms
step:256/1530 train_loss:3.9892 train_time:40188ms step_avg:163.37ms
step:257/1530 train_loss:3.9902 train_time:40353ms step_avg:163.37ms
step:258/1530 train_loss:3.9855 train_time:40518ms step_avg:163.38ms
step:259/1530 train_loss:4.0317 train_time:40686ms step_avg:163.40ms
step:260/1530 train_loss:4.0596 train_time:40852ms step_avg:163.41ms
step:261/1530 train_loss:4.0294 train_time:41019ms step_avg:163.42ms
step:262/1530 train_loss:4.0018 train_time:41186ms step_avg:163.44ms
step:263/1530 train_loss:3.8935 train_time:41352ms step_avg:163.45ms
step:264/1530 train_loss:3.9906 train_time:41518ms step_avg:163.46ms
step:265/1530 train_loss:3.8787 train_time:41685ms step_avg:163.47ms
step:266/1530 train_loss:3.9203 train_time:41851ms step_avg:163.48ms
step:267/1530 train_loss:3.9278 train_time:42016ms step_avg:163.49ms
step:268/1530 train_loss:3.9641 train_time:42184ms step_avg:163.50ms
step:269/1530 train_loss:3.8592 train_time:42350ms step_avg:163.51ms
step:270/1530 train_loss:4.1103 train_time:42515ms step_avg:163.52ms
step:271/1530 train_loss:3.9786 train_time:42681ms step_avg:163.53ms
step:272/1530 train_loss:3.9279 train_time:42847ms step_avg:163.54ms
step:273/1530 train_loss:3.9432 train_time:43013ms step_avg:163.55ms
step:274/1530 train_loss:4.0368 train_time:43180ms step_avg:163.56ms
step:275/1530 train_loss:4.0618 train_time:43346ms step_avg:163.57ms
step:276/1530 train_loss:4.2329 train_time:43511ms step_avg:163.57ms
step:277/1530 train_loss:4.0466 train_time:43678ms step_avg:163.59ms
step:278/1530 train_loss:4.0919 train_time:43845ms step_avg:163.60ms
step:279/1530 train_loss:3.9999 train_time:44010ms step_avg:163.61ms
step:280/1530 train_loss:4.1662 train_time:44179ms step_avg:163.62ms
step:281/1530 train_loss:3.9766 train_time:44346ms step_avg:163.64ms
step:282/1530 train_loss:3.9512 train_time:44512ms step_avg:163.65ms
step:283/1530 train_loss:3.9062 train_time:44677ms step_avg:163.65ms
step:284/1530 train_loss:4.0448 train_time:44845ms step_avg:163.67ms
step:285/1530 train_loss:4.0579 train_time:45010ms step_avg:163.67ms
step:286/1530 train_loss:4.0897 train_time:45175ms step_avg:163.68ms
step:287/1530 train_loss:3.9056 train_time:45340ms step_avg:163.68ms
step:288/1530 train_loss:4.0079 train_time:45505ms step_avg:163.69ms
step:289/1530 train_loss:3.8800 train_time:45670ms step_avg:163.69ms
step:290/1530 train_loss:3.8575 train_time:45835ms step_avg:163.70ms
step:291/1530 train_loss:3.9076 train_time:46000ms step_avg:163.70ms
step:292/1530 train_loss:3.8643 train_time:46166ms step_avg:163.71ms
step:293/1530 train_loss:3.8972 train_time:46330ms step_avg:163.71ms
step:294/1530 train_loss:3.9297 train_time:46495ms step_avg:163.71ms
step:295/1530 train_loss:3.8464 train_time:46660ms step_avg:163.72ms
step:296/1530 train_loss:3.8647 train_time:46826ms step_avg:163.73ms
step:297/1530 train_loss:3.8713 train_time:46991ms step_avg:163.73ms
step:298/1530 train_loss:3.9774 train_time:47155ms step_avg:163.73ms
step:299/1530 train_loss:3.8262 train_time:47322ms step_avg:163.75ms
step:300/1530 train_loss:3.9650 train_time:47488ms step_avg:163.75ms
step:301/1530 train_loss:3.9599 train_time:47652ms step_avg:163.75ms
step:302/1530 train_loss:3.9303 train_time:47816ms step_avg:163.75ms
step:303/1530 train_loss:3.9777 train_time:47982ms step_avg:163.76ms
step:304/1530 train_loss:3.9669 train_time:48147ms step_avg:163.77ms
step:305/1530 train_loss:4.4549 train_time:48312ms step_avg:163.77ms
step:306/1530 train_loss:3.9415 train_time:48477ms step_avg:163.78ms
step:307/1530 train_loss:3.8356 train_time:48643ms step_avg:163.78ms
step:308/1530 train_loss:3.9746 train_time:48809ms step_avg:163.79ms
step:309/1530 train_loss:3.8626 train_time:48972ms step_avg:163.79ms
step:310/1530 train_loss:4.0758 train_time:49138ms step_avg:163.79ms
step:311/1530 train_loss:3.9306 train_time:49305ms step_avg:163.80ms
step:312/1530 train_loss:3.8664 train_time:49470ms step_avg:163.81ms
step:313/1530 train_loss:3.9354 train_time:49635ms step_avg:163.81ms
step:314/1530 train_loss:4.0609 train_time:49801ms step_avg:163.82ms
step:315/1530 train_loss:3.9454 train_time:49967ms step_avg:163.83ms
step:316/1530 train_loss:3.7971 train_time:50131ms step_avg:163.83ms
step:317/1530 train_loss:3.8822 train_time:50295ms step_avg:163.83ms
step:318/1530 train_loss:3.9294 train_time:50461ms step_avg:163.83ms
step:319/1530 train_loss:3.8863 train_time:50625ms step_avg:163.84ms
step:320/1530 train_loss:4.0136 train_time:50791ms step_avg:163.84ms
step:321/1530 train_loss:3.9598 train_time:50956ms step_avg:163.85ms
step:322/1530 train_loss:3.9306 train_time:51122ms step_avg:163.85ms
step:323/1530 train_loss:4.0060 train_time:51288ms step_avg:163.86ms
step:324/1530 train_loss:3.9518 train_time:51453ms step_avg:163.86ms
step:325/1530 train_loss:4.0078 train_time:51618ms step_avg:163.87ms
step:326/1530 train_loss:3.8903 train_time:51784ms step_avg:163.87ms
step:327/1530 train_loss:4.3917 train_time:51949ms step_avg:163.88ms
step:328/1530 train_loss:4.0704 train_time:52113ms step_avg:163.88ms
step:329/1530 train_loss:3.8006 train_time:52278ms step_avg:163.88ms
step:330/1530 train_loss:3.7540 train_time:52444ms step_avg:163.89ms
step:331/1530 train_loss:3.9757 train_time:52609ms step_avg:163.89ms
step:332/1530 train_loss:3.9123 train_time:52774ms step_avg:163.89ms
step:333/1530 train_loss:3.8749 train_time:52939ms step_avg:163.90ms
step:334/1530 train_loss:3.8423 train_time:53104ms step_avg:163.90ms
step:335/1530 train_loss:4.0126 train_time:53270ms step_avg:163.91ms
step:336/1530 train_loss:3.9528 train_time:53433ms step_avg:163.91ms
step:337/1530 train_loss:4.4263 train_time:53598ms step_avg:163.91ms
step:338/1530 train_loss:3.9294 train_time:53765ms step_avg:163.92ms
step:339/1530 train_loss:3.8546 train_time:53929ms step_avg:163.92ms
step:340/1530 train_loss:3.9366 train_time:54093ms step_avg:163.92ms
step:341/1530 train_loss:3.8557 train_time:54261ms step_avg:163.93ms
step:342/1530 train_loss:3.8060 train_time:54428ms step_avg:163.94ms
step:343/1530 train_loss:3.8354 train_time:54597ms step_avg:163.95ms
step:344/1530 train_loss:3.9930 train_time:54765ms step_avg:163.97ms
step:345/1530 train_loss:3.8249 train_time:54932ms step_avg:163.98ms
step:346/1530 train_loss:3.7617 train_time:55100ms step_avg:163.99ms
step:347/1530 train_loss:3.7903 train_time:55270ms step_avg:164.00ms
step:348/1530 train_loss:3.8634 train_time:55437ms step_avg:164.01ms
step:349/1530 train_loss:3.8333 train_time:55606ms step_avg:164.03ms
step:350/1530 train_loss:3.5679 train_time:55773ms step_avg:164.04ms
step:351/1530 train_loss:3.8329 train_time:55942ms step_avg:164.05ms
step:352/1530 train_loss:4.1803 train_time:56109ms step_avg:164.06ms
step:353/1530 train_loss:3.6595 train_time:56277ms step_avg:164.07ms
step:354/1530 train_loss:3.9283 train_time:56444ms step_avg:164.08ms
step:355/1530 train_loss:3.7846 train_time:56612ms step_avg:164.09ms
step:356/1530 train_loss:3.8858 train_time:56781ms step_avg:164.11ms
step:357/1530 train_loss:3.7549 train_time:56949ms step_avg:164.12ms
step:358/1530 train_loss:3.8644 train_time:57117ms step_avg:164.13ms
step:359/1530 train_loss:3.8049 train_time:57287ms step_avg:164.15ms
step:360/1530 train_loss:3.4206 train_time:57456ms step_avg:164.16ms
step:361/1530 train_loss:4.0106 train_time:57625ms step_avg:164.17ms
step:362/1530 train_loss:3.9113 train_time:57793ms step_avg:164.18ms
step:363/1530 train_loss:3.8375 train_time:57960ms step_avg:164.19ms
step:364/1530 train_loss:3.7442 train_time:58128ms step_avg:164.20ms
step:365/1530 train_loss:3.9119 train_time:58296ms step_avg:164.22ms
step:366/1530 train_loss:3.8526 train_time:58466ms step_avg:164.23ms
step:367/1530 train_loss:3.8538 train_time:58632ms step_avg:164.23ms
step:368/1530 train_loss:3.8506 train_time:58799ms step_avg:164.24ms
step:369/1530 train_loss:3.7499 train_time:58969ms step_avg:164.26ms
step:370/1530 train_loss:3.8802 train_time:59135ms step_avg:164.26ms
step:371/1530 train_loss:3.7301 train_time:59303ms step_avg:164.27ms
step:372/1530 train_loss:3.6863 train_time:59471ms step_avg:164.29ms
step:373/1530 train_loss:3.9118 train_time:59638ms step_avg:164.29ms
step:374/1530 train_loss:3.8292 train_time:59806ms step_avg:164.30ms
step:375/1530 train_loss:3.7962 train_time:59974ms step_avg:164.31ms
step:375/1530 val_loss:3.8255 train_time:60022ms step_avg:164.45ms