razmars commited on
Commit
9a54b60
·
verified ·
1 Parent(s): b34e6b8

Update modeling_super_linear.py

Browse files
Files changed (1) hide show
  1. modeling_super_linear.py +68 -38
modeling_super_linear.py CHANGED
@@ -191,49 +191,79 @@ class NLinear(nn.Module):
191
  return x+seq_last # to [Batch, Output length, Channel]
192
 
193
 
 
 
 
 
 
194
  class RLinear(nn.Module):
195
- def __init__(self, input_len, output_len):
196
- super(RLinear, self).__init__()
197
- self.Linear = nn.Linear(input_len, output_len)
198
- self.seq_len = input_len
199
- self.horizon = output_len
200
- self.revin_layer = RevIN(num_features = None, affine=False, norm_type = None, subtract_last = False)
 
 
201
 
 
 
202
 
203
- def forward(self, x):
204
- # x: [Batch, Input length,Channel]
205
- x_shape = x.shape
206
- if len(x_shape) == 2:
207
- x = x.unsqueeze(-1)
208
-
209
- B,L,V = x.shape
210
- if L < self.seq_len:
211
- in_features = L
212
- W = self.Linear.weight.detach()
213
- fixed_weights = W[:, :L]
214
- dynamic_weights = W[:, L:]
215
-
216
- if in_features != W.size(1):
217
- dynamic_weights = F.interpolate(dynamic_weights.unsqueeze(0).unsqueeze(0), size=(self.horizon, in_features-self.seq_len), mode='bilinear', align_corners=False).squeeze(0).squeeze(0)
218
- if self.fixed_in != 0:
219
- fixed_weights = F.interpolate(fixed_weights.unsqueeze(0).unsqueeze(0), size=(self.horizon, L), mode='bilinear', align_corners=False).squeeze(0).squeeze(0)
220
-
221
- x = self.revin_layer(x, 'norm')
222
- x = F.linear(x, torch.cat((fixed_weights, dynamic_weights), dim=1))
223
- x = self.revin_layer(x, 'denorm')
224
- if len(x_shape) == 2:
225
- x = x.squeeze(-1)
226
- return x
227
 
228
-
229
- x = x.clone()
230
- x = self.revin_layer(x, 'norm')
231
- x = self.Linear(x.permute(0,2,1)).permute(0,2,1).clone()
232
- x = self.revin_layer(x, 'denorm')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
- if len(x_shape) == 2:
235
- x = x.squeeze(-1)
236
- return x # to [Batch, Output length, Channel]
237
  "-------------------------------------------------------------------------------------------------------------------"
238
  class SparseNoisyMoE(nn.Module):
239
  def __init__(self, configs, experts=None):
 
191
  return x+seq_last # to [Batch, Output length, Channel]
192
 
193
 
194
+ import torch
195
+ import torch.nn as nn
196
+ import torch.nn.functional as F
197
+
198
+
199
  class RLinear(nn.Module):
200
+ """
201
+ Linear projection from a variable-length input (L) to a fixed horizon,
202
+ applied *independently per channel* and wrapped with RevIN.
203
+ """
204
+ def __init__(self, input_len: int, output_len: int):
205
+ super().__init__()
206
+ self.seq_len = input_len # “design” length
207
+ self.horizon = output_len
208
 
209
+ # bias=False because you asked to drop the bias
210
+ self.linear = nn.Linear(input_len, output_len, bias=False)
211
 
212
+ # your RevIN layer (must be defined elsewhere)
213
+ self.revin = RevIN(num_features=None, affine=False,
214
+ norm_type=None, subtract_last=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
+ # ------------------------------------------------------------------ helpers
217
+ def _resize_weight(self, weight: torch.Tensor, new_in: int) -> torch.Tensor:
218
+ """
219
+ Bilinearly interpolate columns so the weight becomes (horizon, new_in).
220
+ """
221
+ if new_in == weight.shape[1]:
222
+ return weight # nothing to do
223
+ w4d = weight.unsqueeze(0).unsqueeze(0) # (1,1,out,in)
224
+ w_resized = F.interpolate(
225
+ w4d,
226
+ size=(self.horizon, new_in), # always ≥ 0, so no crash
227
+ mode="bilinear",
228
+ align_corners=False
229
+ )[0, 0] # back to (out,new_in)
230
+ return w_resized
231
+
232
+ # ------------------------------------------------------------------ forward
233
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
234
+ """
235
+ x: (B,L,C) or (B,L) → (B,horizon,C) or (B,horizon)
236
+ """
237
+ squeeze_last = False
238
+ if x.dim() == 2: # (B,L)
239
+ x = x.unsqueeze(-1) # (B,L,1)
240
+ squeeze_last = True
241
+
242
+ B, L, C = x.shape
243
+
244
+ # ---------- RevIN normalisation ---------------------------------------
245
+ x = self.revin(x, "norm")
246
+
247
+ if L == self.seq_len: # fast path – no resizing
248
+ x = self.linear(x.permute(0, 2, 1)) # (B,C,horizon)
249
+ x = x.permute(0, 2, 1) # (B,horizon,C)
250
+
251
+ else: # resize the weight once
252
+ W = self._resize_weight(self.linear.weight.detach(), L) # (out,L)
253
+
254
+ # project each channel separately: (B,C,L) @ (L,out)ᵀ → (B,C,out)
255
+ x = x.permute(0, 2, 1) # (B,C,L)
256
+ x = torch.matmul(x, W.t()) # (B,C,out)
257
+ x = x.permute(0, 2, 1) # (B,horizon,C)
258
+
259
+ # ---------- RevIN denormalisation -------------------------------------
260
+ x = self.revin(x, "denorm")
261
+
262
+ if squeeze_last:
263
+ x = x.squeeze(-1) # (B,horizon)
264
+
265
+ return x
266
 
 
 
 
267
  "-------------------------------------------------------------------------------------------------------------------"
268
  class SparseNoisyMoE(nn.Module):
269
  def __init__(self, configs, experts=None):