Skip to content

Commit

Permalink
Merge branch 'taichi-dev:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
wanmeihuali authored Jul 23, 2023
2 parents bf521ed + c41eec1 commit e1c7811
Show file tree
Hide file tree
Showing 58 changed files with 836 additions and 427 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/scripts/ti_build/alter.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def enter_shell():
if shell.name in ("pwsh.exe", "powershell.exe"):
pwsh = Command(shell.exe)
path = _write_ti_pwshrc()
pwsh("-Interactive", "-NoExit", "-File", str(path))
pwsh("-ExecutionPolicy", "Bypass", "-NoExit", "-File", str(path))
elif shell.name == "cmd.exe":
cmd = Command(shell.exe)
cmd("/k", "set", "PROMPT=TaichiBuild $P$G")
Expand Down
2 changes: 1 addition & 1 deletion cpp_examples/aot_save.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ void aot_save(taichi::Arch arch) {

// program.materialize_runtime();
auto *root = new SNode(0, SNodeType::root);
auto *pointer = &root->dense(Axis(0), n, "");
auto *pointer = &root->dense(Axis(0), n);
auto *place = &pointer->insert_children(SNodeType::place);
place->dt = PrimitiveType::i32;
program.add_snode_tree(std::unique_ptr<SNode>(root), /*compile_only=*/true);
Expand Down
5 changes: 2 additions & 3 deletions cpp_examples/autograd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,10 @@ void autograd() {
}
};

auto *snode =
&root->dense(Axis(0), n, "").insert_children(SNodeType::place);
auto *snode = &root->dense(Axis(0), n).insert_children(SNodeType::place);
snode->dt = PrimitiveType::f32;
snode->grad_info = std::make_unique<GradInfoPrimal>(
&root->dense(Axis(0), n, "").insert_children(SNodeType::place));
&root->dense(Axis(0), n).insert_children(SNodeType::place));
snode->get_adjoint()->dt = PrimitiveType::f32;
snode->get_adjoint()->grad_info = std::make_unique<GradInfoAdjoint>();
return snode;
Expand Down
2 changes: 1 addition & 1 deletion cpp_examples/run_snode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ void run_snode() {
int n = 10;
program.materialize_runtime();
auto *root = new SNode(0, SNodeType::root);
auto *pointer = &root->pointer(Axis(0), n, "");
auto *pointer = &root->pointer(Axis(0), n);
auto *place = &pointer->insert_children(SNodeType::place);
place->dt = PrimitiveType::i32;
program.add_snode_tree(std::unique_ptr<SNode>(root), /*compile_only=*/false);
Expand Down
6 changes: 4 additions & 2 deletions python/taichi/lang/_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from taichi.lang import impl
from taichi.lang.enums import Layout
from taichi.lang.exception import TaichiIndexError
from taichi.lang.util import cook_dtype, python_scope, to_numpy_type
from taichi.lang.util import cook_dtype, get_traceback, python_scope, to_numpy_type
from taichi.types import primitive_types
from taichi.types.ndarray_type import NdarrayTypeMetadata
from taichi.types.utils import is_real, is_signed
Expand Down Expand Up @@ -237,7 +237,9 @@ class ScalarNdarray(Ndarray):
def __init__(self, dtype, arr_shape):
super().__init__()
self.dtype = cook_dtype(dtype)
self.arr = impl.get_runtime().prog.create_ndarray(self.dtype, arr_shape, layout=Layout.NULL, zero_fill=True)
self.arr = impl.get_runtime().prog.create_ndarray(
self.dtype, arr_shape, layout=Layout.NULL, zero_fill=True, dbg_info=_ti_core.DebugInfo(get_traceback())
)
self.shape = tuple(self.arr.shape)
self.element_type = dtype

Expand Down
2 changes: 1 addition & 1 deletion python/taichi/lang/any_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def subscript(self, i, j):
ast_builder.expr_subscript(
self.arr.ptr,
make_expr_group(*indices),
impl.get_runtime().get_current_src_info(),
_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()),
)
)

Expand Down
4 changes: 2 additions & 2 deletions python/taichi/lang/ast/ast_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -988,7 +988,7 @@ def build_Attribute(ctx, node):
.expr_subscript(
node.value.ptr.ptr,
make_expr_group(keygroup.index(node.attr)),
impl.get_runtime().get_current_src_info(),
_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()),
)
)
else:
Expand All @@ -997,7 +997,7 @@ def build_Attribute(ctx, node):
node.value.ptr.ptr,
[make_expr_group(keygroup.index(ch)) for ch in node.attr],
(attr_len,),
impl.get_runtime().get_current_src_info(),
_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()),
)
)
else:
Expand Down
8 changes: 6 additions & 2 deletions python/taichi/lang/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,13 @@ class Expr(TaichiOperations):

def __init__(self, *args, tb=None, dtype=None):
self.tb = tb
self.ptr_type_checked = False
if len(args) == 1:
if isinstance(args[0], _ti_core.Expr):
self.ptr = args[0]
elif isinstance(args[0], Expr):
self.ptr = args[0].ptr
self.ptr_type_checked = args[0].ptr_type_checked
self.tb = args[0].tb
elif is_matrix_class(args[0]):
self.ptr = make_matrix(args[0].to_list()).ptr
Expand All @@ -38,8 +40,10 @@ def __init__(self, *args, tb=None, dtype=None):
else:
assert False
if self.tb:
self.ptr.set_tb(self.tb)
self.ptr.type_check(impl.get_runtime().prog.config())
self.ptr.set_dbg_info(_ti_core.DebugInfo(self.tb))
if not self.ptr_type_checked:
self.ptr.type_check(impl.get_runtime().prog.config())
self.ptr_type_checked = True

def is_tensor(self):
return self.ptr.is_tensor()
Expand Down
15 changes: 9 additions & 6 deletions python/taichi/lang/impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,9 @@ def expr_init(rhs):
if hasattr(rhs, "_data_oriented"):
return rhs
return Expr(
get_runtime().compiling_callable.ast_builder().expr_var(Expr(rhs).ptr, get_runtime().get_current_src_info())
get_runtime()
.compiling_callable.ast_builder()
.expr_var(Expr(rhs).ptr, _ti_core.DebugInfo(get_runtime().get_current_src_info()))
)


Expand Down Expand Up @@ -175,6 +177,7 @@ def validate_subscript_index(value, index):

@taichi_scope
def subscript(ast_builder, value, *_indices, skip_reordered=False):
dbg_info = _ti_core.DebugInfo(get_runtime().get_current_src_info())
ast_builder = get_runtime().compiling_callable.ast_builder()
# Directly evaluate in Python for non-Taichi types
if not isinstance(
Expand Down Expand Up @@ -251,14 +254,14 @@ def subscript(ast_builder, value, *_indices, skip_reordered=False):
)

if isinstance(value, MatrixField):
return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, get_runtime().get_current_src_info()))
return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, dbg_info))
if isinstance(value, StructField):
entries = {k: subscript(ast_builder, v, *indices) for k, v in value._items}
entries["__struct_methods"] = value.struct_methods
return _IntermediateStruct(entries)
return Expr(ast_builder.expr_subscript(_var, indices_expr_group, get_runtime().get_current_src_info()))
return Expr(ast_builder.expr_subscript(_var, indices_expr_group, dbg_info))
if isinstance(value, AnyArray):
return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, get_runtime().get_current_src_info()))
return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, dbg_info))
assert isinstance(value, Expr)
# Index into TensorType
# value: IndexExpression with ret_type = TensorType
Expand Down Expand Up @@ -291,10 +294,10 @@ def subscript(ast_builder, value, *_indices, skip_reordered=False):
value.ptr,
multiple_indices,
return_shape,
get_runtime().get_current_src_info(),
dbg_info,
)
)
return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, get_runtime().get_current_src_info()))
return Expr(ast_builder.expr_subscript(value.ptr, indices_expr_group, dbg_info))


class SrcInfoGuard:
Expand Down
13 changes: 11 additions & 2 deletions python/taichi/lang/matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from taichi.lang.field import Field, ScalarField, SNodeHostAccess
from taichi.lang.util import (
cook_dtype,
get_traceback,
in_python_scope,
python_scope,
taichi_scope,
Expand Down Expand Up @@ -1655,7 +1656,11 @@ def __init__(self, n, m, dtype, shape):
self.element_type = _type_factory.get_tensor_type((self.n, self.m), self.dtype)
# TODO: we should pass in element_type, shape, layout instead.
self.arr = impl.get_runtime().prog.create_ndarray(
cook_dtype(self.element_type), shape, Layout.AOS, zero_fill=True
cook_dtype(self.element_type),
shape,
Layout.AOS,
zero_fill=True,
dbg_info=ti_python_core.DebugInfo(get_traceback()),
)

@property
Expand Down Expand Up @@ -1765,7 +1770,11 @@ def __init__(self, n, dtype, shape):
self.shape = tuple(shape)
self.element_type = _type_factory.get_tensor_type((n,), self.dtype)
self.arr = impl.get_runtime().prog.create_ndarray(
cook_dtype(self.element_type), shape, Layout.AOS, zero_fill=True
cook_dtype(self.element_type),
shape,
Layout.AOS,
zero_fill=True,
dbg_info=ti_python_core.DebugInfo(get_traceback()),
)

@property
Expand Down
4 changes: 2 additions & 2 deletions python/taichi/lang/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ def __init__(self, mesh: MeshInstance, element_type: MeshElementType, entry_expr
ast_builder.expr_subscript(
attr.ptr,
global_entry_expr_group,
impl.get_runtime().get_current_src_info(),
_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()),
)
),
)
Expand All @@ -612,7 +612,7 @@ def __init__(self, mesh: MeshInstance, element_type: MeshElementType, entry_expr
ast_builder.expr_subscript(
var,
global_entry_expr_group,
impl.get_runtime().get_current_src_info(),
_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()),
)
),
)
Expand Down
2 changes: 1 addition & 1 deletion python/taichi/lang/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1358,7 +1358,7 @@ def atomic_xor(x, y):

@writeback_binary
def assign(a, b):
impl.get_runtime().compiling_callable.ast_builder().expr_assign(a.ptr, b.ptr, stack_info())
impl.get_runtime().compiling_callable.ast_builder().expr_assign(a.ptr, b.ptr, _ti_core.DebugInfo(stack_info()))
return a


Expand Down
4 changes: 3 additions & 1 deletion python/taichi/lang/simt/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ def __init__(self, shape, dtype):
raise ValueError(
f"ti.simt.block.shared_array shape must be an integer or a tuple of integers, but got {shape}"
)
if isinstance(dtype, impl.MatrixType):
dtype = dtype.tensor_type
self.dtype = dtype
self.shared_array_proxy = impl.expr_init_shared_array(self.shape, dtype)

Expand All @@ -82,6 +84,6 @@ def subscript(self, *indices):
ast_builder.expr_subscript(
self.shared_array_proxy,
make_expr_group(*indices),
impl.get_runtime().get_current_src_info(),
_ti_core.DebugInfo(impl.get_runtime().get_current_src_info()),
)
)
12 changes: 6 additions & 6 deletions python/taichi/lang/snode.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def dense(self, axes, dimensions):
"""
if isinstance(dimensions, numbers.Number):
dimensions = [dimensions] * len(axes)
return SNode(self.ptr.dense(axes, dimensions, get_traceback()))
return SNode(self.ptr.dense(axes, dimensions, _ti_core.DebugInfo(get_traceback())))

def pointer(self, axes, dimensions):
"""Adds a pointer SNode as a child component of `self`.
Expand All @@ -51,7 +51,7 @@ def pointer(self, axes, dimensions):
raise TaichiRuntimeError("Pointer SNode is not supported on this backend.")
if isinstance(dimensions, numbers.Number):
dimensions = [dimensions] * len(axes)
return SNode(self.ptr.pointer(axes, dimensions, get_traceback()))
return SNode(self.ptr.pointer(axes, dimensions, _ti_core.DebugInfo(get_traceback())))

@staticmethod
def _hash(axes, dimensions):
Expand All @@ -78,7 +78,7 @@ def dynamic(self, axis, dimension, chunk_size=None):
assert len(axis) == 1
if chunk_size is None:
chunk_size = dimension
return SNode(self.ptr.dynamic(axis[0], dimension, chunk_size, get_traceback()))
return SNode(self.ptr.dynamic(axis[0], dimension, chunk_size, _ti_core.DebugInfo(get_traceback())))

def bitmasked(self, axes, dimensions):
"""Adds a bitmasked SNode as a child component of `self`.
Expand All @@ -94,7 +94,7 @@ def bitmasked(self, axes, dimensions):
raise TaichiRuntimeError("Bitmasked SNode is not supported on this backend.")
if isinstance(dimensions, numbers.Number):
dimensions = [dimensions] * len(axes)
return SNode(self.ptr.bitmasked(axes, dimensions, get_traceback()))
return SNode(self.ptr.bitmasked(axes, dimensions, _ti_core.DebugInfo(get_traceback())))

def quant_array(self, axes, dimensions, max_num_bits):
"""Adds a quant_array SNode as a child component of `self`.
Expand All @@ -109,7 +109,7 @@ def quant_array(self, axes, dimensions, max_num_bits):
"""
if isinstance(dimensions, numbers.Number):
dimensions = [dimensions] * len(axes)
return SNode(self.ptr.quant_array(axes, dimensions, max_num_bits, get_traceback()))
return SNode(self.ptr.quant_array(axes, dimensions, max_num_bits, _ti_core.DebugInfo(get_traceback())))

def place(self, *args, offset=None):
"""Places a list of Taichi fields under the `self` container.
Expand All @@ -129,7 +129,7 @@ def place(self, *args, offset=None):
for arg in args:
if isinstance(arg, BitpackedFields):
bit_struct_type = arg.bit_struct_type_builder.build()
bit_struct_snode = self.ptr.bit_struct(bit_struct_type, get_traceback())
bit_struct_snode = self.ptr.bit_struct(bit_struct_type, _ti_core.DebugInfo(get_traceback()))
for field, id_in_bit_struct in arg.fields:
bit_struct_snode.place(field, offset, id_in_bit_struct)
elif isinstance(arg, Field):
Expand Down
8 changes: 4 additions & 4 deletions taichi/codegen/llvm/codegen_llvm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -569,7 +569,7 @@ void TaskCodeGenLLVM::visit(BinaryOpStmt *stmt) {
llvm_val[stmt] =
call("debug_add_" + stmt->ret_type->to_string(), get_arg(0),
llvm_val[stmt->lhs], llvm_val[stmt->rhs],
builder->CreateGlobalStringPtr(stmt->tb));
builder->CreateGlobalStringPtr(stmt->get_tb()));
#endif
} else {
llvm_val[stmt] =
Expand All @@ -584,7 +584,7 @@ void TaskCodeGenLLVM::visit(BinaryOpStmt *stmt) {
llvm_val[stmt] =
call("debug_sub_" + stmt->ret_type->to_string(), get_arg(0),
llvm_val[stmt->lhs], llvm_val[stmt->rhs],
builder->CreateGlobalStringPtr(stmt->tb));
builder->CreateGlobalStringPtr(stmt->get_tb()));
#endif
} else {
llvm_val[stmt] =
Expand All @@ -599,7 +599,7 @@ void TaskCodeGenLLVM::visit(BinaryOpStmt *stmt) {
llvm_val[stmt] =
call("debug_mul_" + stmt->ret_type->to_string(), get_arg(0),
llvm_val[stmt->lhs], llvm_val[stmt->rhs],
builder->CreateGlobalStringPtr(stmt->tb));
builder->CreateGlobalStringPtr(stmt->get_tb()));
#endif
} else {
llvm_val[stmt] =
Expand Down Expand Up @@ -646,7 +646,7 @@ void TaskCodeGenLLVM::visit(BinaryOpStmt *stmt) {
llvm_val[stmt] =
call("debug_shl_" + stmt->ret_type->to_string(), get_arg(0),
llvm_val[stmt->lhs], llvm_val[stmt->rhs],
builder->CreateGlobalStringPtr(stmt->tb));
builder->CreateGlobalStringPtr(stmt->get_tb()));
} else {
llvm_val[stmt] =
builder->CreateShl(llvm_val[stmt->lhs], llvm_val[stmt->rhs]);
Expand Down
18 changes: 9 additions & 9 deletions taichi/codegen/spirv/spirv_codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1139,31 +1139,31 @@ class TaskCodegen : public IRVisitor {
TI_WARN_IF(lhs_value.stype.id != rhs_value.stype.id,
"${} type {} != ${} type {}\n{}", lhs_name,
lhs_value.stype.dt->to_string(), rhs_name,
rhs_value.stype.dt->to_string(), bin->tb);
rhs_value.stype.dt->to_string(), bin->get_tb());

bool debug = caps_->get(DeviceCapability::spirv_has_non_semantic_info);

if (debug && op_type == BinaryOpType::add && is_integral(dst_type.dt)) {
if (is_unsigned(dst_type.dt)) {
bin_value = generate_uadd_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_uadd_overflow(lhs_value, rhs_value, bin->get_tb());
} else {
bin_value = generate_sadd_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_sadd_overflow(lhs_value, rhs_value, bin->get_tb());
}
bin_value = ir_->cast(dst_type, bin_value);
} else if (debug && op_type == BinaryOpType::sub &&
is_integral(dst_type.dt)) {
if (is_unsigned(dst_type.dt)) {
bin_value = generate_usub_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_usub_overflow(lhs_value, rhs_value, bin->get_tb());
} else {
bin_value = generate_ssub_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_ssub_overflow(lhs_value, rhs_value, bin->get_tb());
}
bin_value = ir_->cast(dst_type, bin_value);
} else if (debug && op_type == BinaryOpType::mul &&
is_integral(dst_type.dt)) {
if (is_unsigned(dst_type.dt)) {
bin_value = generate_umul_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_umul_overflow(lhs_value, rhs_value, bin->get_tb());
} else {
bin_value = generate_smul_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_smul_overflow(lhs_value, rhs_value, bin->get_tb());
}
bin_value = ir_->cast(dst_type, bin_value);
}
Expand All @@ -1187,9 +1187,9 @@ class TaskCodegen : public IRVisitor {

else if (debug && op_type == BinaryOpType::bit_shl) {
if (is_unsigned(dst_type.dt)) {
bin_value = generate_ushl_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_ushl_overflow(lhs_value, rhs_value, bin->get_tb());
} else {
bin_value = generate_sshl_overflow(lhs_value, rhs_value, bin->tb);
bin_value = generate_sshl_overflow(lhs_value, rhs_value, bin->get_tb());
}
}
BINARY_OP_TO_SPIRV_BITWISE(bit_and, OpBitwiseAnd)
Expand Down
Loading

0 comments on commit e1c7811

Please sign in to comment.