Disruptor 框架的简单实践

淺唱寂寞╮ 提交于 2019-11-28 22:47:52

Disruptor 框架的简单实践

  • https://github.com/LMAX-Exchange/disruptor/wiki/Introduction
  • https://github.com/LMAX-Exchange/disruptor/wiki/Getting-Started
  • http://lmax-exchange.github.io/disruptor/files/Disruptor-1.0.pdf

基础知识

理解Disruptor是什么的最好方法是将它与目前很好理解和非常相似的东西进行比较。
在Disruptor的情况下,这将是Java的BlockingQueue。与队列一样,Disruptor的目的是在同一进程内的线程之间移动数据(例如消息或事件)。
但是,Disruptor提供了一些将其与队列区分开来的关键功能。他们是:

  • 具有消费者依赖关系图的消费者多播事件。
  • 为事件预先分配内存。
  • 可选择无锁。

架构图

在这里插入图片描述

核心概念

Ring Buffer

Sequence

Wait Strategy

EventHandler

简单实践

  1. 任务类
package morning.cat;

import lombok.Data;
import lombok.ToString;

/**
 * 任务模型
 */
@Data
@ToString
public class Task {

    private Long taskId;

    private String taskName;

    private String taskContent;
}

  1. 任务事件类
package morning.cat;

/**
 * 任务事件
 */
public class TaskEvent {

    private Task value;

    public void set(Task task) {
        this.value = task;
    }

    public void clear() {
        value = null;
    }

    @Override
    public String toString() {
        return value.toString();
    }
}

  1. 事件工厂类
package morning.cat;

import com.lmax.disruptor.EventFactory;

/**
 * 任务事件工厂类
 */
public class TaskEventFactory implements EventFactory<TaskEvent> {
    @Override
    public TaskEvent newInstance() {
        return new TaskEvent();
    }
}
  1. 消费者 EventHandler
package morning.cat.handle;


import com.lmax.disruptor.EventHandler;
import morning.cat.TaskEvent;

/**
 * 事件处理器(消费者)
 */
public class TaskEventHandle implements EventHandler<TaskEvent> {

    /**
     * 业务处理
     *
     * @param event
     * @param sequence
     * @param endOfBatch
     * @throws Exception
     */
    @Override
    public void onEvent(TaskEvent event, long sequence, boolean endOfBatch) throws Exception {
        System.out.println("业务处理: " + event);
    }
}

// 清理 handle

package morning.cat.handle;

import com.lmax.disruptor.EventHandler;
import morning.cat.TaskEvent;

/**
 * 通过Disruptor传递数据时,对象的寿命可能超过预期。为避免这种情况发生,可能需要在处理事件后清除事件。
 * 如果您有一个事件处理程序清除同一个处理程序中的值就足够了。
 * 如果您有一系列事件处理程序,那么您可能需要在链的末尾放置一个特定的处理程序来处理清除对象。
 * <p>
 * 从环形缓冲区清除对象
 */
public class CleanTaskEventHandle implements EventHandler<TaskEvent> {
    @Override
    public void onEvent(TaskEvent event, long sequence, boolean endOfBatch) throws Exception {
        // Failing to call clear here will result in the
        // object associated with the event to live until
        // it is overwritten once the ring buffer has wrapped
        // around to the beginning.
        event.clear();
    }
}

  1. 生产者
package morning.cat.producer;

import com.lmax.disruptor.RingBuffer;
import morning.cat.Task;
import morning.cat.TaskEvent;

/**
 * Publishing Using the Legacy API (老版生产者)
 */
public class TaskEventProducer {

    private final RingBuffer<TaskEvent> ringBuffer;

    public TaskEventProducer(RingBuffer<TaskEvent> ringBuffer) {
        this.ringBuffer = ringBuffer;
    }


    public void onData(Task bb) {
        long sequence = ringBuffer.next();  // Grab the next sequence
        try {
            TaskEvent event = ringBuffer.get(sequence); // Get the entry in the Disruptor
            // for the sequence
            event.set(bb);  // Fill with data
        } finally {
            ringBuffer.publish(sequence);
        }
    }
}
package morning.cat.producer;

import com.lmax.disruptor.EventTranslatorOneArg;
import com.lmax.disruptor.RingBuffer;
import morning.cat.Task;
import morning.cat.TaskEvent;

/**
 * Publishing Using Translators (3.x版本之后的生产者)
 */
public class TaskEventProducerWithTranslator {

    private final RingBuffer<TaskEvent> ringBuffer;

    public TaskEventProducerWithTranslator(RingBuffer<TaskEvent> ringBuffer) {
        this.ringBuffer = ringBuffer;
    }

    private static final EventTranslatorOneArg<TaskEvent, Task> TRANSLATOR =
            new EventTranslatorOneArg<TaskEvent, Task>() {
                @Override
                public void translateTo(TaskEvent event, long sequence, Task bb) {
                    event.set(bb);
                }
            };

    public void onData(Task bb) {
        //ringBuffer.publishEvent(TRANSLATOR, bb);
        ringBuffer.publishEvent((event, sequence) -> event.set(bb));
    }
}
  1. 主体类
package morning.cat.main;

import com.lmax.disruptor.BlockingWaitStrategy;
import com.lmax.disruptor.RingBuffer;
import com.lmax.disruptor.YieldingWaitStrategy;
import com.lmax.disruptor.dsl.Disruptor;
import com.lmax.disruptor.dsl.ProducerType;
import com.lmax.disruptor.util.DaemonThreadFactory;
import morning.cat.Task;
import morning.cat.TaskEvent;
import morning.cat.TaskEventFactory;
import morning.cat.handle.CleanTaskEventHandle;
import morning.cat.handle.TaskEventHandle;
import morning.cat.producer.TaskEventProducer;
import morning.cat.producer.TaskEventProducerWithTranslator;
import morning.cat.utils.DisruptorUtil;


public class TaskMain {

    public static void main(String[] args) throws Exception {
        // The factory for the event
        TaskEventFactory factory = new TaskEventFactory();

        // Specify the size of the ring buffer, must be power of 2.
        int bufferSize = 1024;

        // Construct the Disruptor
        Disruptor<TaskEvent> disruptor = new Disruptor<>(factory, bufferSize, DaemonThreadFactory.INSTANCE);
        // The default wait strategy used by the Disruptor is the BlockingWaitStrategy.
        // 在内部,BlockingWaitStrategy使用典型的锁和条件变量来处理线程唤醒。
        // BlockingWaitStrategy是可用等待策略中最慢的,但在CPU使用率方面是最保守的,并且将在最广泛的部署选项中提供最一致的行为。但是,再次了解已部署的系统可以实现额外的性能。

        // Construct the Disruptor with a SingleProducerSequencer
        Disruptor<TaskEvent> disruptor2 = new Disruptor(
                factory, bufferSize, DaemonThreadFactory.INSTANCE, ProducerType.SINGLE, new YieldingWaitStrategy());
        // SleepingWaitStrategy -> 与BlockingWaitStrategy一样,SleepWaitStrategy通过使用简单的忙等待循环尝试保守CPU使用,但在循环中间使用LockSupport.parkNanos(1)调用。在典型的Linux系统上,这将使线程暂停约60μs。然而,它具有以下好处:生产线程不需要采取任何其他增加适当计数器的动作,并且不需要发信号通知条件变量的成本。但是,在生产者和消费者线程之间移动事件的平均延迟会更高。它在不需要低延迟的情况下效果最佳,但是对生产线程的影响很小。常见用例是异步日志记录。
        // YieldingWaitStrategy -> YieldingWaitStrategy是可以在低延迟系统中使用的2种等待策略之一,其中可以选择以提高延迟为目标来刻录CPU周期。YieldingWaitStrategy将忙于等待序列增加到适当的值。在循环体内,将调用Thread.yield(),允许其他排队的线程运行。当需要非常高的性能并且事件处理程序线程的数量小于逻辑核心的总数时,这是推荐的等待策略,例如,你启用了超线程。
        // BusySpinWaitStrategy -> BusySpinWaitStrategy是性能最高的等待策略,但对部署环境施加了最高限制。仅当事件处理程序线程的数量小于框中的物理核心数时,才应使用此等待策略。例如。应禁用超线程。

        // Connect the handler
        disruptor.handleEventsWith(new TaskEventHandle())
                // .then(new CleanTaskEventHandle()) // 从环形缓冲区清除对象
        ;

        // Start the Disruptor, starts all threads running
        // disruptor.start();

        if (!DisruptorUtil.isStarted(disruptor)) {
            disruptor.start();
        }

        // Get the ring buffer from the Disruptor to be used for publishing.
        RingBuffer<TaskEvent> ringBuffer = disruptor.getRingBuffer();

        // Producer
        TaskEventProducer producer = new TaskEventProducer(ringBuffer);
        TaskEventProducerWithTranslator translator = new TaskEventProducerWithTranslator(ringBuffer);

        for (long l = 0; true; l++) {
            Task task = new Task();
            task.setTaskId(System.currentTimeMillis());
            task.setTaskName(Thread.currentThread().getName() + task.getTaskId());
            task.setTaskContent("Hello World,this is disruptor");
            producer.onData(task);

            Thread.sleep(100);

            Task task2 = new Task();
            task2.setTaskId(System.currentTimeMillis());
            task2.setTaskName(Thread.currentThread().getName() + task.getTaskId());
            task2.setTaskContent("Hello World,this is task2");
            translator.onData(task2);
        }
    }
}

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!