Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

some fixes #9

Merged
merged 2 commits into from
Apr 22, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions include/tvm/runtime/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ struct Instruction {
/*!
* \brief Construct an allocate tensor instruction with constant shape.
* \param storage The storage to allocate out of.
* \param offset The offset into the storage to allocate from.
* \param shape The shape of the tensor.
* \param dtype The dtype of the tensor.
* \param dst The destination register.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/transform/memory_alloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def dynamic_invoke(self, scope, func, ins, new_args, out_types, ret_type):
size = self.compute_storage_in_relay(
out_shape, out_type.dtype)
alignment = self.compute_alignment(out_type.dtype)
sto = scope.let("storage_{i}".format(i=i), self.alloc_storage(
sto = scope.let("storage_{i}".format(i=i), alloc_storage(
size, alignment, self.default_context, out_type.dtype))
storages.append(sto)

Expand Down
5 changes: 1 addition & 4 deletions src/relay/backend/vm/compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -906,8 +906,6 @@ transform::Sequential MemoryOpt(tvm::Target host_target) {

// Perform memory planning in order to coalesce/reduce allocations.
pass_seqs.push_back(transform::MemoryPlan());
// Compute away possibly introduced constant computation.
pass_seqs.push_back(transform::FoldConstant());

return transform::Sequential(pass_seqs);
}
Expand Down Expand Up @@ -965,8 +963,7 @@ IRModule VMCompiler::OptimizeModule(const IRModule& mod, const TargetsMap& targe
pass_seqs.push_back(transform::LambdaLift());
pass_seqs.push_back(transform::InlinePrimitives());



// Memory optimization
pass_seqs.push_back(MemoryOpt(this->target_host_));

transform::Sequential seq(pass_seqs);
Expand Down
6 changes: 3 additions & 3 deletions src/runtime/vm/executable.cc
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ VMInstructionSerializer SerializeInstruction(const Instruction& instr) {
break;
}
case Opcode::AllocTensor: {
// Number of fields = 6 + instr.alloc_tensor.ndim
// Number of fields = 7 + instr.alloc_tensor.ndim
fields.push_back(instr.alloc_tensor.storage);
fields.push_back(instr.alloc_tensor.offset);
// Save `DLDataType` and the dst register.
Expand Down Expand Up @@ -565,7 +565,7 @@ Instruction DeserializeInstruction(const VMInstructionSerializer& instr) {
return Instruction::InvokePacked(packed_index, arity, output_size, args);
}
case Opcode::AllocTensor: {
// Number of fields = 6 + instr.alloc_tensor.ndim
// Number of fields = 7 + instr.alloc_tensor.ndim
DCHECK_GE(instr.fields.size(), 7U);
DCHECK_EQ(instr.fields.size(), 7U + static_cast<size_t>(instr.fields[4]));

Expand All @@ -580,7 +580,7 @@ Instruction DeserializeInstruction(const VMInstructionSerializer& instr) {
Index ndim = instr.fields[5];
RegName dst = instr.fields[6];

std::vector<Index> shape = ExtractFields(instr.fields, 6, ndim);
std::vector<Index> shape = ExtractFields(instr.fields, 7, ndim);

return Instruction::AllocTensor(storage_reg, offset, shape, dtype, dst);
}
Expand Down