Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add MultiheadAttention to DirectMLX #600

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
185 changes: 185 additions & 0 deletions Libraries/DirectMLX.h
Original file line number Diff line number Diff line change
Expand Up @@ -4291,6 +4291,191 @@ namespace dml

return output;
}

struct MultiHeadAttentionOutputs
{
Expression output;
Optional<Expression> outputPresentKey;
Optional<Expression> outputPresentValue;
};

inline MultiHeadAttentionOutputs MultiHeadAttention(
Copy link
Contributor

@fdwr fdwr Jun 25, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

    inline MultiheadAttentionOutputs MultiheadAttention(

Multihead is a single word (https://en.wiktionary.org/wiki/multihead / https://www.merriam-webster.com/dictionary/multiheaded), consistent with our enum DML_MULTIHEAD_ATTENTION_OPERATOR_DESC and with PyTorch (https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html). The people using hyphens don't know better that "multi" is a prefix :b.

Optional<Expression> query,
Optional<Expression> key,
Optional<Expression> value,
Optional<Expression> stackedQueryKey,
Optional<Expression> stackedKeyValue,
Optional<Expression> stackedQueryKeyValue,
Optional<Expression> bias,
Optional<Expression> mask,
Optional<Expression> relativePositionBias,
Optional<Expression> pastKey,
Optional<Expression> pastValue,
Optional<Expression> pastSequenceLengths,
float scale,
float maskFilterValue,
uint32_t queryHeadCount,
uint32_t keyValueHeadCount,
DML_MULTIHEAD_ATTENTION_MASK_TYPE maskType,
bool computeOutputPresentKeyValue,
Optional<uint32_t> maxSequenceLength = {})
{
assert(query || stackedQueryKey || stackedQueryKeyValue);

detail::GraphBuilder* builder = nullptr;

if (query)
{
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(minor 🤷) assert(stackedKeyValue || (key && value)); for assertion consistency with the other branches?

assert(!stackedQueryKey);
assert(!stackedQueryKeyValue);
builder = query->Impl()->GetGraphBuilder();
}
else if (stackedQueryKey)
{
assert(!query);
assert(!key);
assert(value);
assert(!stackedKeyValue);
assert(!stackedQueryKeyValue);
builder = stackedQueryKey->Impl()->GetGraphBuilder();
}
else
{
assert(stackedQueryKeyValue);
assert(!query);
assert(!key);
assert(!value);
assert(!stackedQueryKey);
assert(!stackedKeyValue);
assert(!stackedQueryKeyValue);
builder = stackedQueryKeyValue->Impl()->GetGraphBuilder();
}

TensorDesc queryTensor = query ? query->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc keyTensor = key ? key->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc valueTensor = value ? value->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc stackedQueryKeyTensor = stackedQueryKey ? stackedQueryKey->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc stackedKeyValueTensor = stackedKeyValue ? stackedKeyValue->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc stackedQueryKeyValueTensor = stackedQueryKeyValue ? stackedQueryKeyValue->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc biasTensor = bias ? bias->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc maskTensor = mask ? mask->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc relativePositionBiasTensor = relativePositionBias ? relativePositionBias->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc pastKeyTensor = pastKey ? pastKey->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc pastValueTensor = pastValue ? pastValue->Impl()->GetOutputDesc() : TensorDesc();
TensorDesc pastSequenceLengthsTensor = pastSequenceLengths ? pastSequenceLengths->Impl()->GetOutputDesc() : TensorDesc();

uint32_t batchSize;
uint32_t sequenceLength;
uint32_t headSize;
uint32_t valueHeadSize;
DML_TENSOR_DATA_TYPE dataType;

if (query)
{
assert(queryTensor.sizes.size() >= 3);
batchSize = queryTensor.sizes[queryTensor.sizes.size() - 3];
sequenceLength = queryTensor.sizes[queryTensor.sizes.size() - 2];
headSize = queryTensor.sizes[queryTensor.sizes.size() - 1] / queryHeadCount;
dataType = queryTensor.dataType;

if (value)
{
assert(valueTensor.sizes.size() >= 3);
valueHeadSize = valueTensor.sizes[valueTensor.sizes.size() - 1] / keyValueHeadCount;
}
else if (stackedKeyValue)
{
assert(valueTensor.sizes.size() >= 3);
valueHeadSize = valueTensor.sizes[valueTensor.sizes.size() - 1];
}
}
else if (stackedQueryKey)
{
assert(stackedQueryKeyTensor.sizes.size() >= 5);
batchSize = stackedQueryKeyTensor.sizes[stackedQueryKeyTensor.sizes.size() - 5];
sequenceLength = stackedQueryKeyTensor.sizes[stackedQueryKeyTensor.sizes.size() - 4];
headSize = stackedQueryKeyTensor.sizes[stackedQueryKeyTensor.sizes.size() - 1];
dataType = stackedQueryKeyTensor.dataType;
}
else
{
assert(stackedQueryKeyValue);
assert(stackedQueryKeyValueTensor.sizes.size() >= 5);
batchSize = stackedQueryKeyValueTensor.sizes[stackedQueryKeyValueTensor.sizes.size() - 5];
Copy link
Contributor

@fdwr fdwr Jun 25, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

stackedQueryKeyValueTensor.sizes.size() - 5

I worry about callers using DMLX and directly populating tensors from some model description, and then DMLX accessing invalid negative indices here because the tensor size is too small, especially if that model description comes from external data that is not completely under the program's control. We could say that it's the responsibility of the caller to validate all these sizes up-front before calling DMLX, but even DML validates tensor sizes before accessing any potentially invalid indices. Can we strengthen these mere asserts which only happen in debug builds to an std::invalid_argument instead?

e.g.

            DMLX_THROW_IF_NOT(stackedQueryKeyValueTensor.sizes.size() >= 5, std::invalid_argument);
            batchSize = stackedQueryKeyValueTensor.sizes[stackedQueryKeyValueTensor.sizes.size() - 5];
#if __cpp_exceptions
    #if DMLX_USE_WIL
        #define DMLX_THROW_IF_FAILED(_hr) THROW_IF_FAILED(_hr)
        #define DMLX_THROW(_hr) THROW_HR(_hr)
        #define DMLX_THROW_IF_NOT(condition, exceptionType) if (!(condition)) { throw exceptionType; }
    #else
        #define DMLX_THROW_IF_FAILED(_hr) if (FAILED(_hr)) { throw std::runtime_error(#_hr); }
        #define DMLX_THROW(_hr) throw std::runtime_error(#_hr); 
        #define DMLX_THROW_IF_NOT(condition, exceptionType) if (!(condition)) { throw exceptionType; }
    #endif
#else
    #define DMLX_THROW_IF_FAILED(_hr) if (FAILED(_hr)) { std::abort(); }
    #define DMLX_THROW(_hr) { std::abort(); } 
    #define DMLX_THROW_IF_NOT(condition, exceptionType) { std::abort(); } 
#endif

I'm not proposing we turn every assert into an exception, as DML API validation will validate things too, and we don't need to doubly validate in DMLX, but at least to validate the cases where DMLX itself would access invalid memory.

sequenceLength = stackedQueryKeyValueTensor.sizes[stackedQueryKeyValueTensor.sizes.size() - 4];
headSize = stackedQueryKeyValueTensor.sizes[stackedQueryKeyValueTensor.sizes.size() - 1];
valueHeadSize = headSize;
dataType = stackedQueryKeyValueTensor.dataType;
}

uint32_t outputHiddenSize = valueHeadSize * queryHeadCount;

TensorDesc::Dimensions outputSizes({batchSize, sequenceLength, outputHiddenSize});
TensorDesc outputTensor = TensorDesc(dataType, outputSizes, builder->GetTensorPolicy());

TensorDesc outputPresentKeyTensor;
TensorDesc outputPresentValueTensor;
if (computeOutputPresentKeyValue)
{
assert(maxSequenceLength);

TensorDesc::Dimensions outputPresentKeySizes({batchSize, keyValueHeadCount, *maxSequenceLength, headSize});
outputPresentKeyTensor = TensorDesc(dataType, outputPresentKeySizes, builder->GetTensorPolicy());

TensorDesc::Dimensions outputPresentValueSizes({batchSize, keyValueHeadCount, *maxSequenceLength, valueHeadSize});
outputPresentValueTensor = TensorDesc(dataType, outputPresentValueSizes, builder->GetTensorPolicy());
}

DML_MULTIHEAD_ATTENTION1_OPERATOR_DESC desc = {};
desc.QueryTensor = query ? queryTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.KeyTensor = key ? keyTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.ValueTensor = value ? valueTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.StackedQueryKeyTensor = stackedQueryKey ? stackedQueryKeyTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.StackedKeyValueTensor = stackedKeyValue ? stackedKeyValueTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.StackedQueryKeyValueTensor = stackedQueryKeyValue ? stackedQueryKeyValueTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.BiasTensor = bias ? biasTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.MaskTensor = mask ? maskTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.RelativePositionBiasTensor = relativePositionBias ? relativePositionBiasTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.PastKeyTensor = pastKey ? pastKeyTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.PastValueTensor = pastValue ? pastValueTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.PastSequenceLengthsTensor = pastSequenceLengths ? pastSequenceLengthsTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.OutputTensor = outputTensor.AsPtr<DML_TENSOR_DESC>();
desc.OutputPresentKeyTensor = computeOutputPresentKeyValue ? outputPresentKeyTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.OutputPresentValueTensor = computeOutputPresentKeyValue ? outputPresentValueTensor.AsPtr<DML_TENSOR_DESC>() : nullptr;
desc.Scale = scale;
desc.MaskFilterValue = maskFilterValue;
desc.QueryHeadCount = queryHeadCount;
desc.KeyValueHeadCount = keyValueHeadCount;
desc.MaskType = maskType;

detail::NodeOutput* const inputs[] = {
query ? query->Impl() : nullptr,
key ? key->Impl() : nullptr,
value ? value->Impl() : nullptr,
stackedQueryKey ? stackedQueryKey->Impl() : nullptr,
stackedKeyValue ? stackedKeyValue->Impl() : nullptr,
stackedQueryKeyValue ? stackedQueryKeyValue->Impl() : nullptr,
bias ? bias->Impl() : nullptr,
mask ? mask->Impl() : nullptr,
relativePositionBias ? relativePositionBias->Impl() : nullptr,
pastKey ? pastKey->Impl() : nullptr,
pastValue ? pastValue->Impl() : nullptr,
pastSequenceLengths ? pastSequenceLengths->Impl() : nullptr,
};
detail::NodeID node = builder->CreateOperatorNode(static_cast<DML_OPERATOR_TYPE>(DML_OPERATOR_MULTIHEAD_ATTENTION1), &desc, inputs);

MultiHeadAttentionOutputs outputs {};

outputs.output = builder->CreateNodeOutput(node, 0, std::move(outputTensor));

if (computeOutputPresentKeyValue)
{
outputs.outputPresentKey = builder->CreateNodeOutput(node, 1, std::move(outputPresentKeyTensor));
outputs.outputPresentValue = builder->CreateNodeOutput(node, 2, std::move(outputPresentValueTensor));
}

return outputs;
}
#endif

// Reinterprets the memory of a tensor with a different type and dimensions (analogously to using
Expand Down