Accumulation/Distribution Line (A/D)

Overview

The Accumulation/Distribution Line tracks cumulative money flow by multiplying volume with a close location value that measures where price closed within its daily range. When price closes near the high, most of the volume counts as accumulation; conversely, closes near the low register as distribution. Each period's money flow value adds to a running total, creating a line that rises when buyers dominate and falls during distribution phases. Divergences between A/D and price reveal whether volume confirms the current trend or signals an impending reversal. Traders watch for the A/D line making higher highs while price stalls, indicating accumulation before a breakout, or lower lows during price advances, warning of distribution before a decline.

Implementation Examples

Get started with A/D in just a few lines:

use vectorta::indicators::ad::{ad, AdInput, AdParams};
use vectorta::utilities::data_loader::Candles;

// Using with OHLCV slices
let high = vec![100.0, 102.0, 101.5, 103.0, 105.0];
let low = vec![98.0, 99.0, 100.0, 101.0, 103.0];
let close = vec![99.5, 101.5, 100.5, 102.5, 104.5];
let volume = vec![10000.0, 12000.0, 11000.0, 13000.0, 15000.0];

let input = AdInput::from_slices(&high, &low, &close, &volume, AdParams::default());
let result = ad(&input)?;

// Using with Candles data structure
// Quick and simple with default parameters
let input = AdInput::with_default_candles(&candles);
let result = ad(&input)?;

// Access the A/D values
for (i, value) in result.values.iter().enumerate() {
    println!("A/D at {}: {}", i, value);
}

API Reference

Input Methods
// From OHLCV slices
AdInput::from_slices(&[f64], &[f64], &[f64], &[f64], AdParams) -> AdInput

// From candles structure
AdInput::from_candles(&Candles, AdParams) -> AdInput

// From candles with default params (no parameters for A/D)
AdInput::with_default_candles(&Candles) -> AdInput
Parameters Structure
pub struct AdParams {}  // No adjustable parameters for A/D
Output Structure
pub struct AdOutput {
    pub values: Vec<f64>, // Cumulative A/D values
}
Error Handling
use vectorta::indicators::ad::AdError;

match ad(&input) {
    Ok(output) => process_results(output.values),
    Err(AdError::CandleFieldError(msg)) =>
        println!("Candle field error: {}", msg),
    Err(AdError::DataLengthMismatch { high_len, low_len, close_len, volume_len }) =>
        println!("Data arrays must have equal length: H:{} L:{} C:{} V:{}",
                 high_len, low_len, close_len, volume_len),
    Err(AdError::NotEnoughData { len }) =>
        println!("Need at least 1 data point, got {}", len),
    Err(e) => println!("A/D error: {}", e)
}

Python Bindings

Basic Usage

Calculate A/D using NumPy arrays:

import numpy as np
from vectorta import ad

# Prepare OHLCV data as NumPy arrays
high = np.array([100.0, 102.0, 101.5, 103.0, 105.0])
low = np.array([98.0, 99.0, 100.0, 101.0, 103.0])
close = np.array([99.5, 101.5, 100.5, 102.5, 104.5])
volume = np.array([10000.0, 12000.0, 11000.0, 13000.0, 15000.0])

# Calculate A/D (no parameters needed)
result = ad(high, low, close, volume)

# Specify kernel for performance optimization
result = ad(high, low, close, volume, kernel="avx2")

# Result is a NumPy array matching input length
print(f"A/D values: {result}")

# Detect divergences
price_trend = close[-1] > close[0]
ad_trend = result[-1] > result[0]
if price_trend != ad_trend:
    print("Divergence detected!")
Streaming Real-time Updates

Process real-time OHLCV updates efficiently:

from vectorta import AdStream

# Initialize streaming A/D calculator
stream = AdStream()

# Process real-time OHLCV updates
for tick in market_data_feed:
    ad_value = stream.update(
        tick.high,
        tick.low,
        tick.close,
        tick.volume
    )

    print(f"Current A/D: {ad_value}")

    # Track changes for trading signals
    if hasattr(stream, 'previous_ad'):
        change = ad_value - stream.previous_ad
        if abs(change) > threshold:
            if change > 0:
                print(f"Accumulation detected: +{change}")
            else:
                print(f"Distribution detected: {change}")

    stream.previous_ad = ad_value
Batch Processing Multiple Securities

Process multiple securities efficiently in parallel:

import numpy as np
from vectorta import ad_batch

# Prepare OHLCV data for multiple securities
# Each list contains arrays for different securities
highs = [high_stock1, high_stock2, high_stock3]    # List of NumPy arrays
lows = [low_stock1, low_stock2, low_stock3]
closes = [close_stock1, close_stock2, close_stock3]
volumes = [volume_stock1, volume_stock2, volume_stock3]

# Calculate A/D for all securities in parallel
results = ad_batch(
    highs,
    lows,
    closes,
    volumes,
    kernel="auto"  # Auto-select best kernel
)

# Access results
print(f"Values shape: {results['values'].shape}")  # (num_securities, time_steps)
print(f"Rows (securities): {results['rows']}")
print(f"Cols (time steps): {results['cols']}")

# Analyze each security
for i in range(results['rows']):
    ad_values = results['values'][i]

    # Find strongest accumulation
    max_ad = np.max(ad_values)
    max_idx = np.argmax(ad_values)
    print(f"Security {i}: Peak accumulation of {max_ad} at index {max_idx}")

    # Calculate rate of change
    if len(ad_values) > 1:
        daily_change = np.diff(ad_values)
        avg_change = np.mean(daily_change)
        print(f"Security {i}: Average daily A/D change: {avg_change}")
CUDA Acceleration

CUDA support for A/D is currently under development. The API will follow the same pattern as other CUDA-enabled indicators.

# Coming soon: CUDA-accelerated A/D calculations
#
# from vectorta import ad_cuda_batch
# import numpy as np
#
# # Process massive portfolio on GPU
# # Prepare data in time-major format [time_steps, num_assets]
# highs_tm = np.array([...])    # Shape: [T, N]
# lows_tm = np.array([...])
# closes_tm = np.array([...])
# volumes_tm = np.array([...])
#
# # Calculate A/D for entire portfolio on GPU
# results = ad_cuda_batch(
#     highs_tm,
#     lows_tm,
#     closes_tm,
#     volumes_tm,
#     device_id=0
# )
# # Returns: 2D array [time_steps, num_assets] with A/D for each asset
#
# # Zero-copy variant with pre-allocated output (F32 for GPU efficiency)
# out = np.empty((time_steps, num_assets), dtype=np.float32)
# ad_cuda_batch_into(
#     highs_tm.astype(np.float32),
#     lows_tm.astype(np.float32),
#     closes_tm.astype(np.float32),
#     volumes_tm.astype(np.float32),
#     out=out,
#     device_id=0
# )

JavaScript/WASM Bindings

Basic Usage

Calculate A/D in JavaScript/TypeScript:

import { ad_js } from 'vectorta-wasm';

// OHLCV data as Float64Array or regular array
const high = new Float64Array([100.0, 102.0, 101.5, 103.0, 105.0]);
const low = new Float64Array([98.0, 99.0, 100.0, 101.0, 103.0]);
const close = new Float64Array([99.5, 101.5, 100.5, 102.5, 104.5]);
const volume = new Float64Array([10000, 12000, 11000, 13000, 15000]);

// Calculate A/D (no parameters needed)
const result = ad_js(high, low, close, volume);

// Result is a Float64Array
console.log('A/D values:', result);

// TypeScript type definitions
interface AdResult {
  values: Float64Array;
}

// Use with async/await for better error handling
async function calculateAD(
  high: Float64Array,
  low: Float64Array,
  close: Float64Array,
  volume: Float64Array
): Promise<Float64Array> {
  try {
    return ad_js(high, low, close, volume);
  } catch (error) {
    console.error('A/D calculation failed:', error);
    throw error;
  }
}
Memory-Efficient Operations

Use zero-copy operations for better performance with large datasets:

import { ad_alloc, ad_free, ad_into, memory } from 'vectorta-wasm';

// Prepare your OHLCV data
const high = new Float64Array([/* your data */]);
const low = new Float64Array([/* your data */]);
const close = new Float64Array([/* your data */]);
const volume = new Float64Array([/* your data */]);
const length = high.length;

// Allocate WASM memory for output
const outputPtr = ad_alloc(length);

// Get input data pointers in WASM memory
const highArray = new Float64Array(memory.buffer, /* offset */, length);
const lowArray = new Float64Array(memory.buffer, /* offset */, length);
const closeArray = new Float64Array(memory.buffer, /* offset */, length);
const volumeArray = new Float64Array(memory.buffer, /* offset */, length);

highArray.set(high);
lowArray.set(low);
closeArray.set(close);
volumeArray.set(volume);

// Calculate A/D directly into allocated memory (zero-copy)
ad_into(
  highArray.byteOffset,    // High pointer
  lowArray.byteOffset,     // Low pointer
  closeArray.byteOffset,   // Close pointer
  volumeArray.byteOffset,  // Volume pointer
  outputPtr,                // Output pointer
  length                    // Data length
);

// Read results from WASM memory
const result = new Float64Array(memory.buffer, outputPtr, length);
const adValues = Array.from(result); // Convert to regular array if needed

// Important: Free allocated memory when done
ad_free(outputPtr, length);

console.log('A/D values:', adValues);
Batch Processing

Process multiple securities efficiently:

import { ad_batch_js, ad_batch_metadata_js } from 'vectorta-wasm';

// Prepare OHLCV data for multiple securities
// Flatten arrays: [security1_data, security2_data, ...]
const numSecurities = 3;
const timeSteps = 100;

const highs_flat = new Float64Array(numSecurities * timeSteps);
const lows_flat = new Float64Array(numSecurities * timeSteps);
const closes_flat = new Float64Array(numSecurities * timeSteps);
const volumes_flat = new Float64Array(numSecurities * timeSteps);

// Fill with your data...
for (let i = 0; i < numSecurities; i++) {
  const offset = i * timeSteps;
  highs_flat.set(securities[i].high, offset);
  lows_flat.set(securities[i].low, offset);
  closes_flat.set(securities[i].close, offset);
  volumes_flat.set(securities[i].volume, offset);
}

// Get metadata about batch dimensions
const metadata = ad_batch_metadata_js(numSecurities, timeSteps);
console.log('Processing ' + metadata[0] + ' securities with ' + metadata[1] + ' time steps');

// Calculate A/D for all securities
const results = ad_batch_js(
  highs_flat,
  lows_flat,
  closes_flat,
  volumes_flat,
  numSecurities
);

// Results is a flat array: [security1_values..., security2_values..., ...]
// Reshape for easier access
const resultMatrix = [];
for (let i = 0; i < numSecurities; i++) {
  const start = i * timeSteps;
  const end = start + timeSteps;
  resultMatrix.push(results.slice(start, end));
}

// Analyze each security
resultMatrix.forEach((adValues, idx) => {
  const lastAD = adValues[adValues.length - 1];
  const firstAD = adValues[0];
  const totalChange = lastAD - firstAD;

  console.log('Security ' + idx + ': Total A/D change: ' + totalChange);

  if (totalChange > 0) {
    console.log('Security ' + idx + ': Net accumulation');
  } else {
    console.log('Security ' + idx + ': Net distribution');
  }
});

Performance Analysis

Comparison:
View:

Across sizes, Rust CPU runs about 2.31× faster than Tulip C in this benchmark.

Loading chart...

AMD Ryzen 9 9950X (CPU) | NVIDIA RTX 4090 (GPU) | Benchmarks: 2026-01-05

Related Indicators