Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions include/infinicore/ops/flipud.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {

class Flipud {
public:
// Schema signature: (Output, Input)
using schema = void (*)(Tensor, Tensor);

static void execute(Tensor output, Tensor input);
static common::OpDispatcher<schema> &dispatcher();
};
Tensor flipud(Tensor input);
void flipud_(Tensor output, Tensor input);

} // namespace infinicore::op
68 changes: 68 additions & 0 deletions include/infinicore/ops/float_power.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {

class FloatPower {
public:
// ==========================================================
// Dispatcher Schemas
// ==========================================================

// Output = Input ^ Scalar (scalar must be double!)
using schema_scalar = void (*)(Tensor output,
Tensor input,
double exponent);

// Output = Input ^ Tensor
using schema_tensor = void (*)(Tensor output,
Tensor input,
Tensor exponent);

// ==========================================================
// Execute Entry Points (called by functional interface)
// ==========================================================

static void execute(Tensor output,
Tensor input,
double exponent);

static void execute(Tensor output,
Tensor input,
Tensor exponent);

// ==========================================================
// Dispatchers
// ==========================================================

static common::OpDispatcher<schema_scalar>& dispatcher_scalar();
static common::OpDispatcher<schema_tensor>& dispatcher_tensor();
};

// =======================================================================
// Functional Interface (Python-visible semantics)
// =======================================================================

// -------------------------------
// 1. Scalar Exponent
// -------------------------------

// out-of-place: ALWAYS float64
Tensor float_power(Tensor input, double exponent);

// in-place
void float_power_(Tensor output, Tensor input, double exponent);

// -------------------------------
// 2. Tensor Exponent
// -------------------------------

// out-of-place: ALWAYS float64
Tensor float_power(Tensor input, Tensor exponent);

// in-place
void float_power_(Tensor output, Tensor input, Tensor exponent);

} // namespace infinicore::op
16 changes: 16 additions & 0 deletions include/infinicore/ops/floor_divide.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {
class FloorDivide {
public:
using schema = void (*)(Tensor, Tensor, Tensor);
static void execute(Tensor c, Tensor a, Tensor b);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor floor_divide(Tensor a, Tensor b);
void floor_divide_(Tensor c, Tensor a, Tensor b);
} // namespace infinicore::op
19 changes: 19 additions & 0 deletions include/infinicore/ops/multi_margin_loss.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {

class MultiMarginLoss {
public:
using schema = void (*)(Tensor, Tensor, Tensor, Tensor, int64_t, float, int64_t);

static void execute(Tensor output, Tensor input, Tensor target, Tensor weight, int64_t p, float margin, int64_t reduction);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor multi_margin_loss(Tensor input, Tensor target, Tensor weight = {}, int64_t p = 1, float margin = 1.0f, int64_t reduction = 1);
void multi_margin_loss_(Tensor output, Tensor input, Tensor target, Tensor weight, int64_t p, float margin, int64_t reduction);

} // namespace infinicore::op
21 changes: 21 additions & 0 deletions include/infinicore/ops/scatter.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#pragma once

#include "../device.hpp"
#include "common/op.hpp"

namespace infinicore::op {

class Scatter {
public:
using schema = void (*)(Tensor, Tensor, int64_t, Tensor, Tensor, int64_t);

static void execute(Tensor output, Tensor input, int64_t dim, Tensor index, Tensor src, int64_t reduction);
static common::OpDispatcher<schema> &dispatcher();
};

Tensor scatter(Tensor input, int64_t dim, Tensor index, Tensor src, int64_t reduction = 0);

// In-place / 指定 Output 接口
void scatter_(Tensor output, Tensor input, int64_t dim, Tensor index, Tensor src, int64_t reduction);

} // namespace infinicore::op
6 changes: 6 additions & 0 deletions include/infiniop.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@
#include "infiniop/ops/sub.h"
#include "infiniop/ops/swiglu.h"
#include "infiniop/ops/tanh.h"
#include "infiniop/ops/take.h"
#include "infiniop/ops/floor_divide.h"
#include "infiniop/ops/float_power.h"
#include "infiniop/ops/flipud.h"
#include "infiniop/ops/scatter.h"
#include "infiniop/ops/triplet_margin_loss.hpp"
#include "infiniop/ops/topkrouter.h"
#include "infiniop/ops/topksoftmax.h"
#include "infiniop/ops/zeros.h"
Expand Down
27 changes: 27 additions & 0 deletions include/infiniop/ops/flipud.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#ifndef __INFINIOP_FLIPUD_API_H__
#define __INFINIOP_FLIPUD_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopFlipudDescriptor_t;

__C __export infiniStatus_t infiniopCreateFlipudDescriptor(infiniopHandle_t handle,
infiniopFlipudDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t output,
infiniopTensorDescriptor_t input);

// 获取工作空间大小
__C __export infiniStatus_t infiniopGetFlipudWorkspaceSize(infiniopFlipudDescriptor_t desc, size_t *size);

// 执行 Flipud 算子
__C __export infiniStatus_t infiniopFlipud(infiniopFlipudDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *output,
const void *input,
void *stream);

// 销毁描述符
__C __export infiniStatus_t infiniopDestroyFlipudDescriptor(infiniopFlipudDescriptor_t desc);

#endif // __INFINIOP_FLIPUD_API_H__
27 changes: 27 additions & 0 deletions include/infiniop/ops/float_power.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#ifndef __INFINIOP_FLOAT_POWER_API_H__
#define __INFINIOP_FLOAT_POWER_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopFloatPowerDescriptor_t;

__C __export infiniStatus_t infiniopCreateFloatPowerDescriptor(infiniopHandle_t handle,
infiniopFloatPowerDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t y,
infiniopTensorDescriptor_t x,
infiniopTensorDescriptor_t exponent,
float scalar_exponent);

__C __export infiniStatus_t infiniopGetFloatPowerWorkspaceSize(infiniopFloatPowerDescriptor_t desc, size_t *size);

__C __export infiniStatus_t infiniopFloatPower(infiniopFloatPowerDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *y,
const void *x,
const void *exponent,
void *stream);

__C __export infiniStatus_t infiniopDestroyFloatPowerDescriptor(infiniopFloatPowerDescriptor_t desc);

#endif
26 changes: 26 additions & 0 deletions include/infiniop/ops/floor_divide.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#ifndef __INFINIOP_FLOOR_DIVIDE_API_H__
#define __INFINIOP_FLOOR_DIVIDE_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopFloorDivideDescriptor_t;

__C __export infiniStatus_t infiniopCreateFloorDivideDescriptor(infiniopHandle_t handle,
infiniopFloorDivideDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t c,
infiniopTensorDescriptor_t a,
infiniopTensorDescriptor_t b);

__C __export infiniStatus_t infiniopGetFloorDivideWorkspaceSize(infiniopFloorDivideDescriptor_t desc, size_t *size);

__C __export infiniStatus_t infiniopFloorDivide(infiniopFloorDivideDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *c,
const void *a,
const void *b,
void *stream);

__C __export infiniStatus_t infiniopDestroyFloorDivideDescriptor(infiniopFloorDivideDescriptor_t desc);

#endif
30 changes: 30 additions & 0 deletions include/infiniop/ops/multi_margin_loss.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#ifndef __INFINIOP_MULTI_MARGIN_LOSS_API_H__
#define __INFINIOP_MULTI_MARGIN_LOSS_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopMultiMarginLossDescriptor_t;
__C __export infiniStatus_t infiniopCreateMultiMarginLossDescriptor(infiniopHandle_t handle,
infiniopMultiMarginLossDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t output,
infiniopTensorDescriptor_t input,
infiniopTensorDescriptor_t target,
infiniopTensorDescriptor_t weight,
int p,
float margin,
int reduction);

__C __export infiniStatus_t infiniopGetMultiMarginLossWorkspaceSize(infiniopMultiMarginLossDescriptor_t desc, size_t *size);

__C __export infiniStatus_t infiniopMultiMarginLoss(infiniopMultiMarginLossDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *output,
const void *input,
const void *target,
const void *weight,
void *stream);

__C __export infiniStatus_t infiniopDestroyMultiMarginLossDescriptor(infiniopMultiMarginLossDescriptor_t desc);

#endif // __INFINIOP_MULTI_MARGIN_LOSS_API_H__
30 changes: 30 additions & 0 deletions include/infiniop/ops/scatter.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#ifndef __INFINIOP_SCATTER_API_H__
#define __INFINIOP_SCATTER_API_H__

#include "../operator_descriptor.h"

typedef struct InfiniopDescriptor *infiniopScatterDescriptor_t;

__C __export infiniStatus_t infiniopCreateScatterDescriptor(infiniopHandle_t handle,
infiniopScatterDescriptor_t *desc_ptr,
infiniopTensorDescriptor_t output,
infiniopTensorDescriptor_t input,
infiniopTensorDescriptor_t indices,
infiniopTensorDescriptor_t updates,
int axis,
int reduction);

__C __export infiniStatus_t infiniopGetScatterWorkspaceSize(infiniopScatterDescriptor_t desc, size_t *size);

__C __export infiniStatus_t infiniopScatter(infiniopScatterDescriptor_t desc,
void *workspace,
size_t workspace_size,
void *output,
const void *input,
const void *indices,
const void *updates,
void *stream);

__C __export infiniStatus_t infiniopDestroyScatterDescriptor(infiniopScatterDescriptor_t desc);

#endif // __INFINIOP_SCATTER_API_H__
3 changes: 2 additions & 1 deletion python/infinicore/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from .rope import RopeAlgo, rope
from .silu import silu
from .swiglu import swiglu

from .triplet_margin_loss import triplet_margin_loss
__all__ = [
"causal_softmax",
"random_sample",
Expand All @@ -17,4 +17,5 @@
"embedding",
"rope",
"RopeAlgo",
"triplet_margin_loss",
]
63 changes: 63 additions & 0 deletions python/infinicore/nn/functional/triplet_margin_loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
from typing import Optional
from infinicore.lib import _infinicore
from infinicore.tensor import Tensor

_REDUCTION_MODES = {
"none": 0,
"mean": 1,
"sum": 2,
}

def triplet_margin_loss(
anchor: Tensor,
positive: Tensor,
negative: Tensor,
margin: float = 1.0,
p: float = 2,
eps: float = 1e-6,
swap: bool = False,
reduction: str = "mean",
*,
out: Optional[Tensor] = None
) -> Tensor:
r"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3 and a margin with a value greater than 0.
"""

if not anchor.is_contiguous():
anchor = anchor.contiguous()
if not positive.is_contiguous():
positive = positive.contiguous()
if not negative.is_contiguous():
negative = negative.contiguous()

if reduction not in _REDUCTION_MODES:
raise ValueError(f"{reduction} is not a valid value for reduction")
reduction_val = _REDUCTION_MODES[reduction]

if out is not None:
_infinicore.triplet_margin_loss_(
out._underlying,
anchor._underlying,
positive._underlying,
negative._underlying,
margin,
int(p),
eps,
swap,
reduction_val
)
return out

return Tensor(
_infinicore.triplet_margin_loss(
anchor._underlying,
positive._underlying,
negative._underlying,
margin,
int(p),
eps,
swap,
reduction_val
)
)
Loading