Android-x86
Fork
Donation

  • R/O
  • HTTP
  • SSH
  • HTTPS

external-swiftshader: Commit

external/swiftshader


Commit MetaInfo

Revision0b77aa5e8d9ec5bb1bd8484159b26c767cf90818 (tree)
Time2020-04-14 18:43:47
AuthorNicolas Capens <capn@goog...>
CommiterNicolas Capens

Log Message

Separate Operand store logic from EmitStore()

The new Store() helper function can store Operand instances independent
from SPIR-V instructions. This allows reuse of this logic for other
instructions that need to store to memory, like Modf and Frexp.

Bug: b/153641251
Change-Id: I453bb7cd24ba26b9a23d73568dc3374a52a36073
Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/43695
Presubmit-Ready: Nicolas Capens <nicolascapens@google.com>
Kokoro-Result: kokoro <noreply+kokoro@google.com>
Tested-by: Nicolas Capens <nicolascapens@google.com>
Reviewed-by: Ben Clayton <bclayton@google.com>

Change Summary

Incremental Difference

--- a/src/Pipeline/SpirvShader.hpp
+++ b/src/Pipeline/SpirvShader.hpp
@@ -1175,6 +1175,10 @@ private:
11751175 void EvalSpecConstantUnaryOp(InsnIterator insn);
11761176 void EvalSpecConstantBinaryOp(InsnIterator insn);
11771177
1178+ // Helper for implementing OpStore, which doesn't take an InsnIterator so it
1179+ // can also store independent operands.
1180+ void Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder, EmitState *state) const;
1181+
11781182 // LoadPhi loads the phi values from the alloca storage and places the
11791183 // load values into the intermediate with the phi's result id.
11801184 void LoadPhi(InsnIterator insn, EmitState *state) const;
--- a/src/Pipeline/SpirvShaderMemory.cpp
+++ b/src/Pipeline/SpirvShaderMemory.cpp
@@ -71,10 +71,6 @@ SpirvShader::EmitResult SpirvShader::EmitStore(InsnIterator insn, EmitState *sta
7171 bool atomic = (insn.opcode() == spv::OpAtomicStore);
7272 Object::ID pointerId = insn.word(1);
7373 Object::ID objectId = insn.word(atomic ? 4 : 2);
74- auto &object = getObject(objectId);
75- auto &pointer = getObject(pointerId);
76- auto &pointerTy = getType(pointer);
77- auto &elementTy = getType(pointerTy.element);
7874 std::memory_order memoryOrder = std::memory_order_relaxed;
7975
8076 if(atomic)
@@ -84,6 +80,19 @@ SpirvShader::EmitResult SpirvShader::EmitStore(InsnIterator insn, EmitState *sta
8480 memoryOrder = MemoryOrder(memorySemantics);
8581 }
8682
83+ const auto &value = Operand(this, state, objectId);
84+
85+ Store(pointerId, value, atomic, memoryOrder, state);
86+
87+ return EmitResult::Continue;
88+}
89+
90+void SpirvShader::Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder, EmitState *state) const
91+{
92+ auto &pointer = getObject(pointerId);
93+ auto &pointerTy = getType(pointer);
94+ auto &elementTy = getType(pointerTy.element);
95+
8796 ASSERT(!atomic || elementTy.opcode() == spv::OpTypeInt); // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
8897
8998 auto ptr = GetPointerToData(pointerId, 0, state);
@@ -96,28 +105,11 @@ SpirvShader::EmitResult SpirvShader::EmitStore(InsnIterator insn, EmitState *sta
96105 mask = mask & state->storesAndAtomicsMask();
97106 }
98107
99- if(object.kind == Object::Kind::Constant)
100- {
101- // Constant source data.
102- const uint32_t *src = object.constantValue.data();
103- VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
104- auto p = ptr + el.offset;
105- if(interleavedByLane) { p = InterleaveByLane(p); }
106- p.Store(SIMD::Int(src[el.index]), robustness, mask, atomic, memoryOrder);
107- });
108- }
109- else
110- {
111- // Intermediate source data.
112- auto &src = state->getIntermediate(objectId);
113- VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
114- auto p = ptr + el.offset;
115- if(interleavedByLane) { p = InterleaveByLane(p); }
116- p.Store(src.Float(el.index), robustness, mask, atomic, memoryOrder);
117- });
118- }
119-
120- return EmitResult::Continue;
108+ VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
109+ auto p = ptr + el.offset;
110+ if(interleavedByLane) { p = InterleaveByLane(p); }
111+ p.Store(value.Float(el.index), robustness, mask, atomic, memoryOrder);
112+ });
121113 }
122114
123115 SpirvShader::EmitResult SpirvShader::EmitVariable(InsnIterator insn, EmitState *state) const
Show on old repository browser