Skip to content

Commit

Permalink
allow floats for ff_mult
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Jan 12, 2021
1 parent 208d70d commit b82afd7
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 4 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '0.7.2',
version = '0.7.3',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
Expand Down
7 changes: 4 additions & 3 deletions x_transformers/x_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,16 +197,17 @@ def forward(self, x):
class FeedForward(nn.Module):
def __init__(self, dim, dim_out = None, mult = 4, glu = False, dropout = 0.):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.Linear(dim, inner_dim),
nn.GELU()
) if not glu else GEGLU(dim, dim * mult)
) if not glu else GEGLU(dim, inner_dim)

self.net = nn.Sequential(
project_in,
nn.Dropout(dropout),
nn.Linear(dim * mult, dim_out)
nn.Linear(inner_dim, dim_out)
)

def forward(self, x):
Expand Down

0 comments on commit b82afd7

Please sign in to comment.