Skip to content

Commit f4b2d46

Browse files
committed
fix
1 parent eb92824 commit f4b2d46

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

docs/diffusion/stable_diffusion/model/unet_attention.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -602,7 +602,7 @@ <h3>Cross Attention Layer</h3>
602602
<span class="lineno">173</span> <span class="n">k</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">to_k</span><span class="p">(</span><span class="n">cond</span><span class="p">)</span>
603603
<span class="lineno">174</span> <span class="n">v</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">to_v</span><span class="p">(</span><span class="n">cond</span><span class="p">)</span>
604604
<span class="lineno">175</span>
605-
<span class="lineno">176</span> <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;use flash&#39;</span><span class="p">,</span> <span class="n">CrossAttention</span><span class="o">.</span><span class="n">use_flash_attention</span><span class="p">)</span>
605+
<span class="lineno">176</span> <span class="nb">print</span><span class="p">(</span><span class="s1">&#39;use flash&#39;</span><span class="p">,</span> <span class="n">CrossAttention</span><span class="o">.</span><span class="n">use_flash_attention</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">flash</span><span class="p">)</span>
606606
<span class="lineno">177</span>
607607
<span class="lineno">178</span> <span class="k">if</span> <span class="n">CrossAttention</span><span class="o">.</span><span class="n">use_flash_attention</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">flash</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">cond</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">d_head</span> <span class="o">&lt;=</span> <span class="mi">128</span><span class="p">:</span>
608608
<span class="lineno">179</span> <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">flash_attention</span><span class="p">(</span><span class="n">q</span><span class="p">,</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span>

labml_nn/diffusion/stable_diffusion/model/unet_attention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ def forward(self, x: torch.Tensor, cond: Optional[torch.Tensor] = None):
173173
k = self.to_k(cond)
174174
v = self.to_v(cond)
175175

176-
print('use flash', CrossAttention.use_flash_attention)
176+
print('use flash', CrossAttention.use_flash_attention, self.flash)
177177

178178
if CrossAttention.use_flash_attention and self.flash is not None and cond is None and self.d_head <= 128:
179179
return self.flash_attention(q, k, v)

0 commit comments

Comments
 (0)