Skip to content

Samples#

The following sections provide the code samples that are part of the pylon SDK for .NET languages.

Device Removal Handling#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample program demonstrates how to be informed about the removal of a camera device.
    It also shows how to reconnect to a removed device.

    Attention:
    If you build this sample and run it under a debugger using a GigE camera device, pylon will set the heartbeat
    timeout to 5 minutes. This is done to allow debugging and single stepping of the code without
    the camera thinking we're hung because we don't send any heartbeats.
    Accordingly, it would take 5 minutes for the application to notice the disconnection of a GigE device.
*/

using System;
using System.Diagnostics;
using Basler.Pylon;

namespace DeviceRemovalHandling
{
    class DeviceRemovalHandling
    {
        // Event handler for connection loss, is shown here for demonstration purposes only.
        // Note: This event is always called on a separate thread.
        static void OnConnectionLost( Object sender, EventArgs e )
        {
            // For demonstration purposes, print a message.
            Console.WriteLine( "OnConnectionLost has been called." );
        }

        internal static void Main()
        {
            // Time to wait for the user to disconnect the camera device.
            const int cTimeOutMs = 60000;

            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    // For demonstration purposes, only add an event handler for connection loss.
                    camera.ConnectionLost += OnConnectionLost;

                    // Open the connection to the camera device.
                    camera.Open();

                    ///////////////// Don't single step beyond this line when using GigE cameras (see comments above) ///////////////////////////////
                    // Before testing the callbacks, we manually set the heartbeat timeout to a short value when using GigE cameras.
                    // For debug versions, the heartbeat timeout has been set to 5 minutes, so it would take up to 5 minutes
                    // until device removal is detected.
                    camera.Parameters[PLTransportLayer.HeartbeatTimeout].TrySetValue( 1000, IntegerValueCorrection.Nearest );  // 1000 ms timeout

                    // Start the grabbing.
                    camera.StreamGrabber.Start();

                    // Start the timeout timer.
                    Console.WriteLine( "Please disconnect the device. (Timeout {0}s)", cTimeOutMs / 1000.0 );
                    Stopwatch stopWatch = new Stopwatch();
                    stopWatch.Start();

                    // Grab and display images until timeout.
                    while (camera.StreamGrabber.IsGrabbing && stopWatch.ElapsedMilliseconds < cTimeOutMs)
                    {
                        try
                        {
                            // Wait for an image and then retrieve it. A timeout of 10000 ms is used.
                            IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(10000, TimeoutHandling.ThrowException);
                            using (grabResult)
                            {
                                // Image grabbed successfully?
                                if (grabResult.GrabSucceeded)
                                {
                                    // Display the grabbed image.
                                    ImageWindow.DisplayImage( 0, grabResult );
                                }
                            }
                        }
                        catch (Exception)
                        {
                            // An exception occurred. Is it because the camera device has been physically removed?

                            // Known issue: Wait until the system safely detects a possible removal.
                            System.Threading.Thread.Sleep( 1000 );

                            if (!camera.IsConnected)
                            {
                                // Yes, the camera device has been physically removed.
                                Console.WriteLine( "The camera device has been removed. Please reconnect. (Timeout {0}s)", cTimeOutMs / 1000.0 );

                                // Close the camera object to close underlying resources used for the previous connection.
                                camera.Close();

                                // Try to re-establish a connection to the camera device until timeout.
                                // Reopening the camera triggers the above registered Configuration.AcquireContinuous.
                                // Therefore, the camera is parameterized correctly again.
                                camera.Open( cTimeOutMs, TimeoutHandling.ThrowException );

                                // Due to unplugging the camera, settings have changed, e.g. the heartbeat timeout value for GigE cameras.
                                // After the camera has been reconnected, all settings must be restored. This can be done in the CameraOpened
                                // event as shown for the Configuration.AcquireContinuous.
                                camera.Parameters[PLTransportLayer.HeartbeatTimeout].TrySetValue( 1000, IntegerValueCorrection.Nearest );

                                // Restart grabbing.
                                camera.StreamGrabber.Start();

                                // Restart the timeout timer.
                                Console.WriteLine( "Camera reconnected. You may disconnect the camera device again (Timeout {0}s)", cTimeOutMs / 1000.0 );
                                stopWatch.Restart();
                            }
                            else
                            {
                                throw;
                            }
                        }
                    }
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/ 

   This sample illustrates how to grab images and process images asynchronously.
   This means that while the application is processing a buffer,
   the acquisition of the next buffer is done in parallel.
   The sample uses a pool of buffers. The buffers are automatically allocated. Once a buffer is filled
   and ready for processing, the buffer is retrieved from the stream grabber as part of a grab
   result. The grab result is processed and the buffer is passed back to the stream grabber by
   disposing the grab result. The buffer is reused and refilled.
   A buffer retrieved from the stream grabber as a grab result is not overwritten in the background
   as long as the grab result is not disposed.
*/

using System;
using Basler.Pylon;

namespace Grab
{
    class Grab
    {
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    // Open the connection to the camera device.
                    camera.Open();

                    // The parameter MaxNumBuffer can be used to control the amount of buffers
                    // allocated for grabbing. The default value of this parameter is 10.
                    camera.Parameters[PLCameraInstance.MaxNumBuffer].SetValue( 5 );

                    // Start grabbing.
                    camera.StreamGrabber.Start();

                    // Grab a number of images.
                    for (int i = 0; i < 10; ++i)
                    {
                        // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                        IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                        using (grabResult)
                        {
                            // Image grabbed successfully?
                            if (grabResult.GrabSucceeded)
                            {
                                // Access the image data.
                                Console.WriteLine( "SizeX: {0}", grabResult.Width );
                                Console.WriteLine( "SizeY: {0}", grabResult.Height );
                                byte[] buffer = grabResult.PixelData as byte[];
                                Console.WriteLine( "Gray value of first pixel: {0}", buffer[0] );
                                Console.WriteLine( "" );

                                // Display the grabbed image.
                                ImageWindow.DisplayImage( 0, grabResult );
                            }
                            else
                            {
                                Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                            }
                        }
                    }

                    // Stop grabbing.
                    camera.StreamGrabber.Stop();

                    // Close the connection to the camera device.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

VB

'This sample illustrates how to grab images and process images asynchronously.
'This means that while the application is processing a buffer,
'the acquisition of the next buffer is done in parallel.
'The sample uses a pool of buffers. The buffers are automatically allocated. Once a buffer is filled
'and ready for processing, the buffer is retrieved from the stream grabber as part of a grab
'result. The grab result is processed and the buffer is passed back to the stream grabber by
'disposing the grab result. The buffer is reused and refilled.
'A buffer retrieved from the stream grabber as a grab result is not overwritten in the background
'as long as the grab result is not disposed.

Imports Basler.Pylon

Module Grab
    Public Class Grab

        Shared Sub Main()

            ' The exit code of the sample application.
            Dim exitCode As Integer = 0

            Try

                ' Create a camera object that selects the first camera device found.
                ' More constructors are available for selecting a specific camera device.
                Using camera As New Camera()
                    ' Print the model name of the camera.
                    Console.WriteLine("Using camera {0}.", camera.CameraInfo(CameraInfoKey.ModelName))

                    ' Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    AddHandler camera.CameraOpened, AddressOf Configuration.AcquireContinuous

                    ' Open the connection to the camera device.
                    camera.Open()

                    ' The parameter MaxNumBuffer can be used to control the amount of buffers
                    ' allocated for grabbing. The default value of this parameter is 10.
                    camera.Parameters(PLCameraInstance.MaxNumBuffer).SetValue(5)

                    ' Start grabbing.
                    camera.StreamGrabber.Start()

                    ' Grab a number of images.
                    Dim i As Integer = 0
                    For i = 1 To 10
                        ' Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                        Dim grabResult As IGrabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException)
                        Using grabResult
                            ' Image grabbed successfully?
                            If grabResult.GrabSucceeded Then
                                ' Access the image data.
                                Console.WriteLine("SizeX: {0}", grabResult.Width)
                                Console.WriteLine("SizeY: {0}", grabResult.Height)
                                Dim buffer As Byte() = TryCast(grabResult.PixelData, Byte())
                                Console.WriteLine("Gray value of first pixel: {0}", buffer(0))
                                Console.WriteLine("")

                                ' Display the grabbed image.
                                ImageWindow.DisplayImage(0, grabResult)
                            Else
                                Console.WriteLine("Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription)
                            End If
                        End Using
                    Next

                    ' Stop grabbing.
                    camera.StreamGrabber.Stop()

                    ' Close the connection to the camera device.
                    camera.Close()
                End Using

            Catch e As Exception
                Console.Error.WriteLine("Exception: {0}", e.Message)
                exitCode = 1

            Finally
                ' Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine("Press enter to exit.")
                Console.ReadLine()

            End Try

            Environment.Exit(exitCode)

        End Sub

    End Class
End Module

Grab Using Events for USB or GigE Camera#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    Basler USB3 Vision and GigE Vision cameras can send event messages. For example, when a sensor
    exposure has finished, the camera can send an Exposure End event to the computer. The event
    can reach the computer before the image data for the finished exposure has been
    transferred completely. This sample illustrates how to get notified when camera event message data
    has been received.

    The event messages are retrieved automatically and processed by the Camera classes.
    The information contained in event messages is exposed as parameter nodes in the camera node map
    and can be accessed like "normal" camera parameters. These nodes are updated
    when a camera event is received. You can register camera event handler objects that are
    triggered when event data has been received.

    The handler object provides access to the changed parameter but not its source (camera).
    In this sample, we solve the problem with a derived camera class in combination with a handler object as member.

    These mechanisms are demonstrated for the Exposure End event.
    The  Exposure End event carries the following information:
    * EventExposureEndFrameID (USB) / ExposureEndEventFrameID (GigE): Indicates the number of the image frame that has been exposed.
    * EventExposureEndTimestamp(USB) / ExposureEndEventTimestamp (GigE): Indicates the moment when the event has been generated.
    This sample shows how to register event handlers that indicate the arrival of events
    sent by the camera. For demonstration purposes, several different handlers are registered
    for the same event.
*/

using System;
using Basler.Pylon;

namespace Grab_CameraEvents
{
    class EventCamera : Camera
    {
        private static Version Sfnc2_0_0 = new Version(2, 0, 0);

        private IntegerName exposureEndDataName;
        private IntegerName exposureEndFrameID;
        private IntegerName exposureEndTimestamp;

        // This IntegerName can be used for GigE as well as for USB cameras.
        public IntegerName ExposureEndDataName
        {
            get
            {
                return this.exposureEndDataName;
            }
        }

        // This IntegerName selects the frame ID and can be used for GigE as well as for USB cameras.
        public IntegerName ExposureEndFrameID
        {
            get
            {
                return this.exposureEndFrameID;
            }
        }

        // This IntegerName selects the timestamp and can be used for GigE as well as for USB cameras.
        public IntegerName ExposureEndTimestamp
        {
            get
            {
                return this.exposureEndTimestamp;
            }
        }

        public EventCamera()
            : base()
        {
        }


        // Configure camera for event trigger and register exposure end event handler.
        public bool Configure()
        {
            // In this sample, a software trigger is used to demonstrate synchronous processing of the grab results.
            // If you want to react to an event as quickly as possible, you have to use Configuration.AcquireContinuous.
            CameraOpened += Configuration.SoftwareTrigger;

            if (Parameters[PLCameraInstance.GrabCameraEvents].IsWritable)
            {
                Parameters[PLCameraInstance.GrabCameraEvents].SetValue( true );
            }
            else
            {
                throw new Exception( "Can not enable GrabCameraEvents." );
            }

            if (base.Open( 1000, TimeoutHandling.Return ))
            {
                //Check if camera supports waiting for trigger ready
                if (!base.CanWaitForFrameTriggerReady)
                {
                    throw new Exception( "This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger." );
                }

                // Features, e.g., 'ExposureEnd', are named according to the GenICam Standard Feature Naming Convention (SFNC).
                // The SFNC defines a common set of features, their behavior, and the related parameter names.
                // This ensures the interoperability of cameras from different camera vendors.
                // Current Basler USB ace and all Basler ace 2 cameras models are based on SFNC version 2.0.
                // Older Basler GigE cameras are based on previous SFNC versions.
                // Accordingly, the behavior of these cameras and some parameters names will be different.
                // The SFNC version can be used to handle differences between camera device models.
                if (this.GetSfncVersion() < Sfnc2_0_0)
                {
                    // The naming convention for ExposureEnd differs between SFNC 2.0 and previous versions.
                    exposureEndDataName = PLGigECamera.ExposureEndEventTimestamp;
                    exposureEndFrameID = PLGigECamera.ExposureEndEventFrameID;
                    exposureEndTimestamp = PLGigECamera.ExposureEndEventTimestamp;
                }
                else // For SFNC 2.0 cameras, e.g. USB3 Vision cameras
                {
                    exposureEndDataName = PLUsbCamera.EventExposureEnd;
                    exposureEndFrameID = PLUsbCamera.EventExposureEndFrameID;
                    exposureEndTimestamp = PLUsbCamera.EventExposureEndTimestamp;
                }

                // Check if the device supports events.
                if (Parameters[PLCamera.EventSelector].CanSetValue( PLCamera.EventSelector.ExposureEnd ) == false)
                {
                    throw new Exception( "The device doesn't support exposure end event." );
                }

                // Add a callback function to receive the changed FrameID value.
                Parameters[exposureEndDataName].ParameterChanged += OnEventExposureEndData;
                // Enable sending of Exposure End events.
                // Select the event to receive.
                Parameters[PLCamera.EventSelector].SetValue( PLCamera.EventSelector.ExposureEnd );
                // Enable it.
                Parameters[PLCamera.EventNotification].SetValue( PLCamera.EventNotification.On );
            }
            return true;
        }

        // Event handler for exposure end. Only very short processing tasks should be performed by this method.
        // Otherwise, the event notification will block the processing of images.
        public void OnEventExposureEndData( Object sender, ParameterChangedEventArgs e )
        {
            if (Parameters[exposureEndFrameID].IsReadable && Parameters[exposureEndTimestamp].IsReadable)
            {
                Console.WriteLine( "OnEventExposureEndData: Camera: {0} EventArgs {1} FrameID {2} TimeStamp {3}"
                        , CameraInfo[CameraInfoKey.ModelName]
                        , e.Parameter.ToString()
                        , Parameters[exposureEndFrameID].ToString()
                        , Parameters[exposureEndTimestamp].ToString() );
            }
        }
    }

    class Grab_CameraEvent
    {
        internal static void Main()
        {
            const int c_countOfImagesToGrab = 10;
            int exitCode = 0;

            try
            {
                // Create a camera object and select the first camera device found.
                using (EventCamera eventCamera = new EventCamera())
                {
                    // Register the ExposureEnd event with the event handler member.
                    eventCamera.Configure();

                    // Register an event handler object with an anonymous method. The object is important if you want to unregister this event.
                    EventHandler<ParameterChangedEventArgs> handlerTimestamp = (s, e) =>
                    {
                        Console.WriteLine("Anonymous method: TimeStamp {0}", e.Parameter.ToString());
                    };

                    eventCamera.Parameters[eventCamera.ExposureEndTimestamp].ParameterChanged += handlerTimestamp;

                    eventCamera.StreamGrabber.Start( c_countOfImagesToGrab );
                    while (eventCamera.StreamGrabber.IsGrabbing)
                    {
                        if (eventCamera.WaitForFrameTriggerReady( 1000, TimeoutHandling.ThrowException ))
                        {
                            eventCamera.ExecuteSoftwareTrigger();
                        }
                        // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                        IGrabResult grabResult = eventCamera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                        using (grabResult)
                        {
                            // Image grabbed successfully?
                            if (grabResult.GrabSucceeded)
                            {
                                ImageWindow.DisplayImage( 0, grabResult );
                            }
                            else
                            {
                                Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                            }
                        }
                    }
                    // If events are not required anymore, you should unregister the event handlers.
                    eventCamera.Parameters[eventCamera.ExposureEndDataName].ParameterChanged -= eventCamera.OnEventExposureEndData;
                    eventCamera.Parameters[eventCamera.ExposureEndTimestamp].ParameterChanged -= handlerTimestamp;
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Chunk Image#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    Basler cameras provide chunk features. This means that the cameras can generate additional information about each image,
    e.g. frame counters, time stamps, and CRC checksums. The information is appended to the image data as data "chunks".
    This sample illustrates how to enable chunk features, how to grab images, and how to process the appended
    data. When the camera is in chunk mode, it transfers data blocks that are partitioned into chunks. The first
    chunk is always the image data. When chunk features are enabled, the image data chunk is followed by chunks
    containing the information generated by the chunk features.
*/

using System;
using System.Threading;
using Basler.Pylon;

namespace Grab_ChunkImage
{

    class Grab_ChunkImage
    {
        internal static void Main()
        {
            // The number of images to grab.
            const int c_countOfImagesToGrab = 10;

            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    // Open the connection to the camera device.
                    camera.Open();

                    // Enable the chunk mode.
                    if (!camera.Parameters[PLCamera.ChunkModeActive].TrySetValue( true ))
                    {
                        throw new Exception( "The camera doesn't support chunk features" );
                    }

                    // Enable time stamp chunks.
                    camera.Parameters[PLCamera.ChunkSelector].SetValue( PLCamera.ChunkSelector.Timestamp );
                    camera.Parameters[PLCamera.ChunkEnable].SetValue( true );

                    // Enable frame counter chunk if possible.
                    // Applies to cameras based on SFNC 1.x.
                    if (camera.Parameters[PLCamera.ChunkSelector].TrySetValue( PLCamera.ChunkSelector.Framecounter ))
                    {
                        camera.Parameters[PLCamera.ChunkEnable].SetValue( true );
                    }
                    // Enable Frame ID chunk if possible.
                    // Applies to cameras based on SFNC 2.0 or later.
                    else if (camera.Parameters[PLCamera.ChunkSelector].TrySetValue( PLCamera.ChunkSelector.FrameID ))
                    {
                        camera.Parameters[PLCamera.ChunkEnable].SetValue( true );
                    }
                    // Enable generic counters if possible (USB camera devices).
                    else if (camera.Parameters[PLCamera.ChunkSelector].TrySetValue( PLCamera.ChunkSelector.CounterValue ))
                    {
                        camera.Parameters[PLCamera.ChunkEnable].SetValue( true );
                        camera.Parameters[PLCamera.CounterSelector].SetValue( PLCamera.CounterSelector.Counter1 );
                        camera.Parameters[PLCamera.CounterEventSource].SetValue( PLCamera.CounterEventSource.FrameStart );
                    }

                    // Enable CRC checksum chunks.
                    camera.Parameters[PLCamera.ChunkSelector].SetValue( PLCamera.ChunkSelector.PayloadCRC16 );
                    camera.Parameters[PLCamera.ChunkEnable].SetValue( true );


                    // Start grabbing c_countOfImagesToGrab images.
                    camera.StreamGrabber.Start( c_countOfImagesToGrab );

                    // camera.StreamGrabber.Stop() is called automatically by the RetrieveResult() method
                    // when c_countOfImagesToGrab images have been retrieved.
                    while (camera.StreamGrabber.IsGrabbing)
                    {
                        // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                        IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                        using (grabResult)
                        {
                            // Image grabbed successfully?
                            if (grabResult.GrabSucceeded)
                            {

                                // Display the grabbed image.
                                ImageWindow.DisplayImage( 0, grabResult );

                                // Check to see if a buffer containing chunk data has been received.
                                if (PayloadType.ChunkData != grabResult.PayloadTypeValue)
                                {
                                    throw new Exception( "Unexpected payload type received." );
                                }

                                // Because we have enabled the CRC Checksum feature, we can check
                                // the integrity of the buffer.
                                // Note: Enabling the CRC Checksum feature is not a prerequisite for using chunks.
                                // Chunks can also be handled when the CRC Checksum feature is disabled.
                                if (grabResult.HasCRC && grabResult.CheckCRC() == false)
                                {
                                    throw new Exception( "Image was damaged!" );
                                }

                                // Access the chunk data attached to the result.
                                // Before accessing the chunk data, you should check to see
                                // if the chunk is readable. If it is readable, the buffer
                                // contains the requested chunk data.
                                if (grabResult.ChunkData[PLChunkData.ChunkTimestamp].IsReadable)
                                {
                                    Console.WriteLine( "ChunkData: TimeStamp = {0}", grabResult.ChunkData[PLChunkData.ChunkTimestamp] );
                                }
                                else
                                {
                                    Console.WriteLine( "ChunkData: No TimeStamp" );
                                }

                                // Print the frame counter value (SFNC 1.x).
                                if (grabResult.ChunkData[PLChunkData.ChunkFramecounter].IsReadable)
                                {
                                    Console.WriteLine( "ChunkData: FrameCounter = {0}", grabResult.ChunkData[PLChunkData.ChunkFramecounter] );
                                }
                                // Print the Frame ID value (SFNC 2.0 or later).
                                else if (grabResult.ChunkData[PLChunkData.ChunkFrameID].IsReadable)
                                {
                                    Console.WriteLine( "ChunkData: FrameID = {0}", grabResult.ChunkData[PLChunkData.ChunkFrameID] );
                                }
                                // Print the generic counter value (USB camera devices).
                                else if (grabResult.ChunkData[PLChunkData.ChunkCounterSelector].TrySetValue( PLChunkData.ChunkCounterSelector.Counter1 )
                                      && grabResult.ChunkData[PLChunkData.ChunkCounterValue].IsReadable
                                    )
                                {
                                    Console.WriteLine( "ChunkData: FrameCounter = {0}", grabResult.ChunkData[PLChunkData.ChunkCounterValue] );
                                }

                                Console.WriteLine( "" );
                            }
                            else
                            {
                                Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                            }
                        }
                    }
                    camera.Parameters[PLCamera.ChunkModeActive].SetValue( false );
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Multicast#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample demonstrates how to open a camera in multicast mode
    and how to receive a multicast stream.

    Two instances of this application must be started simultaneously on different computers.
    The first application started on PC A acts as the controlling application and has full access to the GigE camera.
    The second instance started on PC B opens the camera in monitor mode.
    This instance is not able to control the camera but can receive multicast streams.

    To get the sample running, start this application first on PC A in control mode.
    After PC A has begun to receive frames, start the second instance of this
    application on PC B in monitor mode.
*/

using System;
using Basler.Pylon;

namespace Grab_MultiCast
{
    class GrabMultiCast
    {
        const UInt32 countOfImagesToGrab = 100;

        // OnImageGrabbed is used to print the image information like Width, Height etc.
        // Can be used to implement other functionality for image grab event.
        static void OnImageGrabbed( Object sender, ImageGrabbedEventArgs e )
        {
            if (e.GrabResult.GrabSucceeded)
            {
                Console.WriteLine( "SizeX:{0}", e.GrabResult.Width );
                Console.WriteLine( "SizeY:{0}", e.GrabResult.Height );
                byte[] pixelData = (byte[])e.GrabResult.PixelData;
                Console.WriteLine( "Gray value of first pixel:{0}", pixelData[0] );
            }
            else
            {
                Console.WriteLine( "Error Code: {0} Error Description: {1}", e.GrabResult.ErrorCode, e.GrabResult.ErrorDescription );
            }
        }


        // This method is called if one or more images have been skipped during
        // image acquisition.
        static void OnImageSkipped( Object sender, ImageGrabbedEventArgs e )
        {
            Console.WriteLine( "OnImageSkipped Event" );
            Console.WriteLine( "Number Of skipped images {0}", e.GrabResult.SkippedImageCount );
        }

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                // For multicast only look for GigE cameras here.
                using (Camera camera = new Camera( DeviceType.GigE, CameraSelectionStrategy.FirstFound ))
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );
                    String deviceType = camera.CameraInfo[CameraInfoKey.DeviceType];

                    Console.WriteLine( "==========" );
                    Console.WriteLine( "{0} Camera", deviceType );
                    Console.WriteLine( "==========" );
                    camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;
                    camera.StreamGrabber.ImageGrabbed += OnImageSkipped;
                    // Get the Key from the user for selecting the mode

                    Console.Write( "Start multicast sample in (c)ontrol or in (m)onitor mode? (c/m) " );
                    ConsoleKeyInfo keyPressed = Console.ReadKey();
                    switch (keyPressed.KeyChar)
                    {
                        // The default configuration must be removed when monitor mode is selected
                        // because the monitoring application is not allowed to modify any parameter settings.
                        case 'm':
                        case 'M':
                            // Monitor mode selected.
                            Console.WriteLine( "\nIn Monitor mode" );

                            // Set MonitorModeActive to true to act as monitor
                            camera.Parameters[PLCameraInstance.MonitorModeActive].SetValue( true );// Set monitor mode

                            // Open the camera.
                            camera.Open();

                            // Select transmission type. If the camera is already controlled by another application
                            // and configured for multicast, the active camera configuration can be used
                            // (IP Address and Port will be set automatically).
                            camera.Parameters[PLGigEStream.TransmissionType].TrySetValue( PLGigEStream.TransmissionType.UseCameraConfig );

                            // Alternatively, the stream grabber could be explicitly set to "multicast"...
                            // In this case, the IP Address and the IP port must also be set.
                            //
                            //camera.Parameters[PLGigEStream.TransmissionType].SetValue(PLGigEStream.TransmissionType.Multicast);
                            //camera.Parameters[PLGigEStream.DestinationAddr].SetValue("239.0.0.1");
                            //camera.Parameters[PLGigEStream.DestinationPort].SetValue(49152);

                            if ((camera.Parameters[PLGigEStream.DestinationAddr].GetValue() != "0.0.0.0") &&
                                 (camera.Parameters[PLGigEStream.DestinationPort].GetValue() != 0))
                            {
                                camera.StreamGrabber.Start( countOfImagesToGrab );
                            }
                            else
                            {
                                throw new Exception( "Failed to open stream grabber (monitor mode): The acquisition is not yet started by the controlling application. Start the controlling application before starting the monitor application." );
                            }
                            break;

                        case 'c':
                        case 'C':
                            // Controlling mode selected.
                            Console.WriteLine( "\nIn Control mode" );

                            // Open the camera.
                            camera.Open();

                            // Set transmission type to "multicast"...
                            // In this case, the IP Address and the IP port must also be set.
                            camera.Parameters[PLGigEStream.TransmissionType].SetValue( PLGigEStream.TransmissionType.Multicast );
                            //camera.Parameters[PLGigEStream.DestinationAddr].SetValue("239.0.0.1");
                            //camera.Parameters[PLGigEStream.DestinationPort].SetValue(49152);

                            // Maximize the image area of interest (Image AOI).
                            camera.Parameters[PLGigECamera.OffsetX].TrySetValue( camera.Parameters[PLGigECamera.OffsetX].GetMinimum() );
                            camera.Parameters[PLGigECamera.OffsetY].TrySetValue( camera.Parameters[PLGigECamera.OffsetY].GetMinimum() );
                            camera.Parameters[PLGigECamera.Width].SetValue( camera.Parameters[PLGigECamera.Width].GetMaximum() );
                            camera.Parameters[PLGigECamera.Height].SetValue( camera.Parameters[PLGigECamera.Height].GetMaximum() );

                            // Set the pixel data format.
                            camera.Parameters[PLGigECamera.PixelFormat].SetValue( PLGigECamera.PixelFormat.Mono8 );

                            camera.StreamGrabber.Start();
                            break;

                        default:
                            throw new NotSupportedException( "Invalid mode selected." );
                    }

                    IGrabResult grabResult;

                    // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
                    // when countOfImagesToGrab images have been retrieved in monitor mode
                    // or when a key is pressed and the camera object is destroyed.
                    Console.WriteLine( "Press any key to quit FrameGrabber..." );

                    while (!Console.KeyAvailable && camera.StreamGrabber.IsGrabbing)
                    {
                        grabResult = camera.StreamGrabber.RetrieveResult( 5000, TimeoutHandling.ThrowException );
                        using (grabResult)
                        {
                            // Image grabbed successfully?
                            if (grabResult.GrabSucceeded)
                            {
                                // Display the image
                                ImageWindow.DisplayImage( 1, grabResult );

                                // The grab result could now be processed here.
                            }
                            else
                            {
                                Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                            }
                        }
                    }

                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling
                Console.Error.WriteLine( "\nException: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Strategies#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample shows the use of the Instant Camera grab strategies.
*/

using System;
using Basler.Pylon;

namespace Grab_Strategies
{
    class Grab_Strategies
    {
        // OnImageGrabbed is used to print the image information like Width, Height etc..
        // Can be used to implement other functionality for image grab event.
        static void OnImageGrabbed( Object sender, ImageGrabbedEventArgs e )
        {
            if (e.GrabResult.GrabSucceeded)
            {
                Console.WriteLine( "SizeX:{0}", e.GrabResult.Width );
                Console.WriteLine( "SizeY:{0}", e.GrabResult.Height );
                byte[] pixelData = (byte[])e.GrabResult.PixelData;
                Console.WriteLine( "Gray value of first pixel:{0}", pixelData[0] );
            }
            else
            {
                Console.WriteLine( "Error Code: {0} Error Description: {1}", e.GrabResult.ErrorCode, e.GrabResult.ErrorDescription );
            }
        }


        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Camera event processing must be activated first, the default is off.
                    camera.Parameters[PLCameraInstance.GrabCameraEvents].SetValue( true );

                    // Change default configuration to enable software triggering.
                    camera.CameraOpened += Configuration.SoftwareTrigger;
                    // Register image grabbed event to print frame info
                    camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;

                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    IGrabResult result;
                    int nBuffersInQueue = 0;

                    // Open the connection to the camera device.
                    camera.Open();

                    // The MaxNumBuffer parameter can be used to control the count of buffers
                    // allocated for grabbing. The default value of this parameter is 10.
                    camera.Parameters[PLStream.MaxNumBuffer].SetValue( 15 );

                    // Can the camera device be queried whether it is ready to accept the next frame trigger?
                    if (camera.CanWaitForFrameTriggerReady)
                    {
                        Console.WriteLine( "Grab using the GrabStrategy.OneByOne default strategy:" );

                        // The GrabStrategy.OneByOne strategy is used. The images are processed
                        // in the order of their arrival.
                        camera.StreamGrabber.Start( GrabStrategy.OneByOne, GrabLoop.ProvidedByUser );

                        // In the background, the grab engine thread retrieves the
                        // image data and queues the buffers into the internal output queue.

                        // Issue software triggers. For each call, wait up to 1000 ms until the camera is ready for triggering the next image.
                        for (int i = 0; i < 3; i++)
                        {
                            if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling.ThrowException ))
                            {
                                camera.ExecuteSoftwareTrigger();
                            }
                        }

                        // For demonstration purposes, wait for the last image to appear in the output queue.
                        System.Threading.Thread.Sleep( 3 * 1000 );

                        // Check that grab results are waiting.
                        if (camera.StreamGrabber.GrabResultWaitHandle.WaitOne( 0 ))
                        {
                            Console.WriteLine( "Grab results wait in the output queue." );
                        }

                        // All triggered images are still waiting in the output queue
                        // and are now retrieved.
                        // The grabbing continues in the background, e.g. when using hardware trigger mode,
                        // as long as the grab engine does not run out of buffers.
                        for (; ; )
                        {
                            result = camera.StreamGrabber.RetrieveResult( 0, TimeoutHandling.Return );
                            if (result != null)
                            {
                                using (result)
                                {
                                    nBuffersInQueue++;
                                }
                            }
                            else
                                break;
                        }

                        Console.WriteLine( "Retrieved {0} grab results from output queue.", nBuffersInQueue );

                        //Stop the grabbing.
                        camera.StreamGrabber.Stop();

                        Console.WriteLine( "Grab using strategy GrabStrategy.LatestImages" );

                        // The GrabStrategy_LatestImages strategy is used. The images are processed
                        // in the order of their arrival, but only a number of the images received last
                        // are kept in the output queue.

                        // The size of the output queue can be adjusted.
                        // When using this strategy the OutputQueueSize parameter can be changed during grabbing.
                        camera.Parameters[PLCameraInstance.OutputQueueSize].SetValue( 2 );

                        camera.StreamGrabber.Start( GrabStrategy.LatestImages, GrabLoop.ProvidedByUser );

                        // Execute the software trigger, wait actively until the camera accepts the next frame trigger or until the timeout occurs.
                        for (int i = 0; i < 3; ++i)
                        {
                            if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling.ThrowException ))
                            {
                                camera.ExecuteSoftwareTrigger();
                            }
                        }

                        // Wait for all images.
                        System.Threading.Thread.Sleep( 3 * 1000 );

                        // Check whether the grab results are waiting.
                        if (camera.StreamGrabber.GrabResultWaitHandle.WaitOne( 0 ))
                        {
                            Console.WriteLine( "Grab results wait in the output queue." );
                        }

                        // Only the images received last are waiting in the internal output queue
                        // and are now retrieved.
                        // The grabbing continues in the background, e.g. when using the hardware trigger mode.
                        nBuffersInQueue = 0;
                        for (; ; )
                        {
                            result = camera.StreamGrabber.RetrieveResult( 0, TimeoutHandling.Return );
                            if (result != null)
                            {
                                using (result)
                                {
                                    if (result.SkippedImageCount > 0)
                                    {
                                        Console.WriteLine( "Skipped {0} images.", result.SkippedImageCount );
                                    }
                                    nBuffersInQueue++;
                                }
                            }
                            else
                                break;
                        }

                        Console.WriteLine( "Retrieved {0} grab result from output queue.", nBuffersInQueue );

                        // When setting the output queue size to 1 this strategy is equivalent to the GrabStrategy_LatestImageOnly grab strategy from C++.
                        camera.Parameters[PLCameraInstance.OutputQueueSize].SetValue( 1 );

                        // When setting the output queue size to CInstantCamera::MaxNumBuffer this strategy is equivalent to GrabStrategy.OneByOne.
                        camera.Parameters[PLCameraInstance.OutputQueueSize].SetValue( camera.Parameters[PLStream.MaxNumBuffer].GetValue() );

                        //Stop the grabbing.
                        camera.StreamGrabber.Stop();
                    }
                    else
                    {
                        // See the documentation of Camera.CanWaitForFrameTriggerReady for more information.
                        Console.WriteLine( "This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger." );
                    }
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Using Action Command for Multiple GigE Cameras#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample shows how to issue a GigE Vision action command to multiple cameras.
    By using an action command, multiple cameras can be triggered at the same time
    as opposed to software triggering where each camera has to be triggered individually.

    To make the execution and configuration of multiple cameras easier, this sample uses the ActionCommandTrigger class.
*/

using System;
using Basler.Pylon;
using System.Collections.Generic;

namespace Grab_UsingActionCommand
{
    class Grab_UsingActionCommand
    {
        /////////////////////////////////////////////////////////////////
        // Limits the amount of cameras used for grabbing.
        // It is important to manage the available bandwidth when grabbing with multiple
        // cameras. This applies, for instance, if two GigE cameras are connected to the
        // same network adapter via a switch.
        // To avoid potential bandwidth problems, it's possible to optimize the
        // transport layer using the pylon Viewer Bandwidth Manager.
        const int c_maxCamerasToUse = 2;


        internal static void Main()
        {
            int exitCode = 0;
            List<Camera> cameras = new List<Camera>();

            try
            {
                // Ask the camera finder for a list of all GigE camera devices.
                // Note that this sample only works with GigE camera devices.
                List<ICameraInfo> allDeviceInfos = CameraFinder.Enumerate(DeviceType.GigE);

                if (allDeviceInfos.Count == 0)
                {
                    throw new ApplicationException( "No GigE cameras present." );
                }

                // Open all cameras to fulfill preconditions for Configure(ICamera())
                allDeviceInfos.ForEach( cameraInfo => cameras.Add( new Camera( cameraInfo ) ) );
                cameras.ForEach( camera => camera.Open() );

                // Prepare all cameras for action commands
                ActionCommandTrigger actionCommandTrigger = new ActionCommandTrigger();

                // Configure all cameras to wait for the action command. If a camera doesn't support action commands, an exception will be thrown.
                actionCommandTrigger.Configure( cameras.ToArray() );

                // Starts grabbing on all cameras.
                // The cameras won't transmit any image data because they are configured to wait for an action command.
                cameras.ForEach( camera => camera.StreamGrabber.Start() );

                // Now we issue the action command to all devices without any DeviceKey, GroupKey, or GroupMask
                // because Configure(ICamera()) had already set these parameters.
                actionCommandTrigger.Issue();

                // Retrieve images from all cameras.
                foreach (Camera camera in cameras)
                {
                    // Camera will return grab results in the order they arrive.
                    IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                    using (grabResult)
                    {
                        // Image grabbed successfully?
                        if (grabResult.GrabSucceeded)
                        {
                            // Print the model name and the IP address of the camera.
                            Console.WriteLine( "Image grabbed successfully for: {0} ({1})",
                                camera.CameraInfo.GetValueOrDefault( CameraInfoKey.FriendlyName, null ),
                                camera.CameraInfo.GetValueOrDefault( CameraInfoKey.DeviceIpAddress, null ) );
                        }
                        else
                        {
                            // If a buffer hasn't been grabbed completely, the network bandwidth is possibly insufficient for transferring
                            // multiple images simultaneously. See note above c_maxCamerasToUse.
                            Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                        }
                    }
                }
                // To avoid overtriggering, you should call cameras[0].WaitForFrameTriggerReady
                // (see Grab_UsingGrabLoopThread sample for details).
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Stop stream grabber and close all cameras.
                cameras.ForEach( camera => { camera.StreamGrabber.Stop(); camera.Close(); camera.Dispose(); } );
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Using Buffer Factory#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

   This sample demonstrates how to use a user-provided buffer factory.
   Using a buffer factory is optional and intended for advanced use cases only.
   A buffer factory is only necessary if you want to grab into externally supplied buffers.
*/

using System;
using System.Runtime.InteropServices;
using Basler.Pylon;

namespace Grab_UsingBufferFactory
{

    // A user-provided buffer factory.
    class MyBufferFactory : IBufferFactory
    {
        public MyBufferFactory()
        {
        }

        // This function will be called by pylon.NET when it needs to allocate a buffer to store image data.
        // The bufferSize parameter specifies the size in bytes of the buffer to allocate. On return you must
        // set the output parameters createdPinnedBuffer and createdPinnedObject. Optionally you can set
        // bufferUserData. The bufferUserData can later be used to identify the buffer.
        // In case the allocation fails you must throw an exception to indicate the failure.
        // Note: This function may be called from different threads.

        public void AllocateBuffer( long bufferSize, ref object createdPinnedObject, ref IntPtr createdPinnedBuffer, ref object bufferUserData )
        {
            // Allocate buffer for pixel data.
            // If you already have a buffer allocated by your image processing library you can use this instead.
            // In this case you must modify the delete code (see below) accordingly.
            long elementSize = sizeof( ushort );
            long arraySize = (bufferSize + (elementSize - 1)) / elementSize; //Round up if size does not align

            // The pinned object will receive the actual allocated object (in our case the array).
            // This information can be retrieved from a grab result by calling
            // grabResult.PixelData;
            createdPinnedObject = new ushort[(int)(arraySize)]; // ATTENTION: in .NET array indexes are always int!!!

            // The pinned buffer will receive the pointer to the pinned memory location
            // that will be used for image data aquisition internally.
            GCHandle handle = GCHandle.Alloc( createdPinnedObject, GCHandleType.Pinned );
            createdPinnedBuffer = handle.AddrOfPinnedObject();

            // Here we store the GCHandle in the buffer user data to be able to free the
            // buffer in FreeBuffer.
            bufferUserData = handle;

            Console.WriteLine( "Created buffer {0}.", createdPinnedBuffer );
        }

        // Frees a buffer allocated by a previous call to AllocateBuffer.
        // Warning: This method can be called by different threads.
        public void FreeBuffer( object createdPinnedObject, IntPtr createdPinnedBuffer, object bufferUserData )
        {
            if (null == bufferUserData)
            {
                return;
            }

            // We used the buffer user data to store the buffer handle.
            // Now we use this buffer handle to free/unpin.
            GCHandle handle = (GCHandle)bufferUserData;
            if (null == handle)
            {
                return;
            }

            Console.WriteLine( "Freed buffer {0}.", handle.AddrOfPinnedObject() );

            handle.Free();
        }
    }

    class Grab_UsingBufferFactory
    {
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    // Open the connection to the camera device.
                    camera.Open();

                    // Set buffer factory before starting the stream grabber because allocation
                    // happens there.
                    MyBufferFactory myFactory = new MyBufferFactory();
                    camera.StreamGrabber.BufferFactory = myFactory;

                    // Start grabbing.
                    camera.StreamGrabber.Start();

                    // Grab a number of images.
                    for (int i = 0; i < 10; ++i)
                    {
                        // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                        IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                        using (grabResult)
                        {
                            // Image grabbed successfully?
                            if (grabResult.GrabSucceeded)
                            {
                                // Access the image data.
                                Console.WriteLine( "SizeX: {0}", grabResult.Width );
                                Console.WriteLine( "SizeY: {0}", grabResult.Height );

                                // Normally we would have a byte array in the pixel data.
                                // However we are using the buffer factory here which allocates
                                // ushort arrays.
                                ushort[] buffer = grabResult.PixelData as ushort[];
                                Console.WriteLine( "First value of pixel data: {0}", buffer[0] );
                                Console.WriteLine( "" );

                                // Display the grabbed image.
                                ImageWindow.DisplayImage( 0, grabResult );
                            }
                            else
                            {
                                Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                            }
                        }
                    }

                    // Stop grabbing.
                    camera.StreamGrabber.Stop();

                    // Close the connection to the camera device.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Using Exposure End Event#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample shows how to use the Exposure End event to speed up image acquisition.
    For example, when a sensor exposure is finished, the camera can send an Exposure End event to the computer.
    The computer can receive the event before the image data of the finished exposure has been transferred completely.
    This avoids unnecessary delays, e.g., when an image object moves before the related image data transfer is complete.

    Note: This sample shows how to match incoming images using the PLCamera.EventExposureEndFrameID and the GrabResult.BlockID values. For ace 2 camera models, PLCamera.EventExposureEndFrameID and the GrabResult.BlockID don't contain matching values. The GrabResult.BlockID equivalent is the chunk value represented by PLCamera.ChunkSelector.FrameID. Please see the Grab_ChunkImage sample for more information about how to determine the correct chunk value to use instead of GrabResult.BlockID.
*/

using System;
using System.Collections.Generic;
using Basler.Pylon;
using System.Diagnostics;

namespace Grab_UsingExposureEndEvent
{
    // Used for logging received events without outputting the information on the screen
    // because outputting will change the timing.
    // This class is used for demonstration purposes only.
    internal class LogItem
    {
        private string eventType;
        private long frameNumber;
        private double time;

        public string EventType
        {
            get
            {
                return this.eventType;
            }
        }
        public long FrameNumber
        {
            get
            {
                return this.frameNumber;
            }
        }
        public double Time
        {
            get
            {
                return time;
            }
        }

        //Stores the values inside private variables.
        public LogItem( string type, long frameNr )
        {
            eventType = type;
            frameNumber = frameNr;
            time = Stopwatch.GetTimestamp();
        }
    };

    class Grab_UsingExposureEndEvent
    {
        private static Version sfnc2_0_0 = new Version(2, 0, 0);

        private static long nextExpectedFrameNumberImage;
        private static long nextExpectedFrameNumberExposureEnd;
        private static long nextFrameNumberForMove;
        private static bool gevCounterRules;

        private static string eventNotificationOn;
        private static IntegerName exposureEndEventFrameId;

        // Number of images to be grabbed.
        public static long countOfImagesToGrab = 50;
        // Create list of log items.
        public static List<LogItem> logItems = new List<LogItem>();


        private static void Configure( Camera camera )
        {
            // Camera event processing must be enabled first. The default is off.
            // The camera must be closed to do this.
            if (camera.Parameters[PLCameraInstance.GrabCameraEvents].IsWritable)
            {
                camera.Parameters[PLCameraInstance.GrabCameraEvents].SetValue( true );
            }
            else
            {
                throw new Exception( "Can't enable GrabCameraEvents." );
            }

            // Open the camera to configure parameters.
            camera.Open();

            // Check whether the device supports events.
            if (!camera.Parameters[PLCamera.EventSelector].IsWritable)
            {
                throw new Exception( "The device doesn't support events." );
            }

            // GigE cameras don't use frame ID 0.
            gevCounterRules = camera.CameraInfo[CameraInfoKey.TLType] == TLType.GigE;

            if (camera.GetSfncVersion() < sfnc2_0_0)
            {
                // On older cameras, frame IDs start at 1.
                nextExpectedFrameNumberImage = 1;
                nextExpectedFrameNumberExposureEnd = 1;
                nextFrameNumberForMove = 1;

                // The naming of the Exposure End event differs between SFNC 2.0 and previous versions.
                exposureEndEventFrameId = PLCamera.ExposureEndEventFrameID;
                eventNotificationOn = PLCamera.EventNotification.GenICamEvent;

                // Add an event handler that notifies you when an Exposure End event is received.
                // On older cameras, the parameter is called "ExposureEndEventData".
                camera.Parameters["ExposureEndEventData"].ParameterChanged += delegate ( Object sender, ParameterChangedEventArgs e )
                {
                    OnCameraEventExposureEndData( sender, e, camera );
                };
            }
            else
            {
                // On current cameras (using SFNC 2.0, e.g., USB3 Vision cameras),
                // frame IDs start at 0.
                nextExpectedFrameNumberImage = 0;
                nextExpectedFrameNumberExposureEnd = 0;
                nextFrameNumberForMove = 0;

                // The naming of the Exposure End event differs between SFNC 2.0 and previous versions.
                exposureEndEventFrameId = PLCamera.EventExposureEndFrameID;
                eventNotificationOn = PLCamera.EventNotification.On;

                // Add an event handler that notifies you when an Exposure End event is received.
                // On current cameras, the parameter is called "EventExposureEndData".
                camera.Parameters["EventExposureEndData"].ParameterChanged += delegate ( Object sender, ParameterChangedEventArgs e )
                {
                    OnCameraEventExposureEndData( sender, e, camera );
                };

            }

            // Event handler for images received.
            camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;

            // The network packet signaling an event on a GigE camera device may get lost in the network.
            // The following commented parameters can be used to control the handling of lost events.
            //camera.Parameters[ParametersPLGigEEventGrabber.Timeout]
            //camera.Parameters[PLGigEEventGrabber.RetryCount]

            // Enable the sending of Exposure End events.
            // Select the event to be received.
            camera.Parameters[PLCamera.EventSelector].SetValue( PLCamera.EventSelector.ExposureEnd );
            camera.Parameters[PLCamera.EventNotification].SetValue( eventNotificationOn );
        }


        private static void Disable( Camera camera )
        {
            // Disable the sending of Exposure End events.
            camera.Parameters[PLCamera.EventSelector].SetValue( PLCamera.EventSelector.ExposureEnd );
            camera.Parameters[PLCamera.EventNotification].SetValue( PLCamera.EventNotification.Off );
        }


        private static long GetIncrementedFrameNumber( long frameNr )
        {
            frameNr++;

            // GigE cameras use a 16 bit value and will wrap around earlier.
            // They skip the value of 0 and continue with 1
            if (gevCounterRules && unchecked((UInt16)frameNr) == 0)
            {
                frameNr = 1;
            }

            return frameNr;
        }


        private static void MoveImagedItemOrSensorHead()
        {
            // The imaged item or the sensor head can be moved now...
            // The camera may not be ready yet for a trigger at this point because the sensor is still being read out.
            // See the documentation of the CInstantCamera::WaitForFrameTriggerReady() method for more information.
            logItems.Add( new LogItem( "Move", nextFrameNumberForMove ) );
            nextFrameNumberForMove = GetIncrementedFrameNumber( nextFrameNumberForMove );
        }


        private static void OnCameraEventExposureEndData( Object sender, ParameterChangedEventArgs e, Camera camera )
        {
            // An image has been received. The block ID is equal to the frame number on GigE camera devices.
            long frameNumber = 0;
            if (camera.Parameters[exposureEndEventFrameId].IsReadable)
            {
                frameNumber = camera.Parameters[exposureEndEventFrameId].GetValue();
            }
            logItems.Add( new LogItem( "ExposureEndEvent", frameNumber ) );

            if (GetIncrementedFrameNumber( frameNumber ) != nextExpectedFrameNumberExposureEnd)
            {
                // Check whether the imaged item or the sensor head can be moved.
                // This will be the case if the Exposure End event has been lost or if the Exposure End event is received later than the image.
                if (frameNumber == nextFrameNumberForMove)
                {
                    MoveImagedItemOrSensorHead();
                }

                // Check for missing events.
                if (frameNumber != nextExpectedFrameNumberExposureEnd)
                {
                    logItems.Add( new LogItem( "An Exposure End event has been lost. Expected frame number is " + nextExpectedFrameNumberExposureEnd + " but got frame number " + frameNumber, frameNumber ) );
                    // Resync.
                    nextExpectedFrameNumberExposureEnd = frameNumber;
                }

                nextExpectedFrameNumberExposureEnd = GetIncrementedFrameNumber( nextExpectedFrameNumberExposureEnd );
            }
        }


        // This handler is called when an image has been received.
        private static void OnImageGrabbed( Object sender, ImageGrabbedEventArgs e )
        {
            // Read frame number from grab result.
            long frameNumber = e.GrabResult.BlockID;
            logItems.Add( new LogItem( "Image Received", frameNumber ) );

            if (frameNumber == nextFrameNumberForMove)
            {
                MoveImagedItemOrSensorHead();
            }

            // Check for missing images.
            if (frameNumber != nextExpectedFrameNumberImage)
            {
                logItems.Add( new LogItem( "An image has been lost. Expected frame number is " + nextExpectedFrameNumberExposureEnd + " but got frame number " + frameNumber, frameNumber ) );
                // Resync.
                nextExpectedFrameNumberImage = frameNumber;
            }

            nextExpectedFrameNumberImage = GetIncrementedFrameNumber( nextExpectedFrameNumberImage );
        }

        // This will print all the log items to console.
        private static void PrintLog()
        {
            Console.WriteLine( "Warning. The time values printed may not be correct on older computer hardware." );
            Console.WriteLine( "Time [ms]    Event                 Frame Number" );
            Console.WriteLine( "----------   ----------------      --------------" );

            var ticks = new List<double>();

            foreach (LogItem item in logItems)
            {
                ticks.Add( item.Time );
            }

            int i = 0;
            foreach (LogItem item in logItems)
            {
                double time_ms = 0;
                if (i > 0)
                {
                    double newTicks = ticks[i];
                    double oldTicks = ticks[i - 1];
                    time_ms = (((newTicks - oldTicks)) * 1000 / Stopwatch.Frequency);
                }

                i++;

                // {0,10:0.0000} Formatting the size of time_ms to 10 spaces and precision of 4.
                Console.WriteLine( String.Format( "{0,10:0.0000}", time_ms ) + " {0,18}       {1}", item.EventType, item.FrameNumber );
            }
        }

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;
            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );
                    if (camera.CameraInfo[CameraInfoKey.ModelName].StartsWith( "a2A" ))
                    {
                        Console.WriteLine( "Note: This sample may not work as expected when used with ace 2 cameras." );
                        Console.WriteLine( "      Please see note at the beginnging of the sample for details." );
                    }

                    // Configure the camera.
                    Configure( camera );

                    // Start grabbing of countOfImagesToGrab images.
                    // The camera device is operated in a default configuration that
                    // sets up free-running continuous acquisition.
                    camera.StreamGrabber.Start( countOfImagesToGrab );

                    IGrabResult result;
                    while (camera.StreamGrabber.IsGrabbing)
                    {
                        // Retrieve grab results and notify the camera event and image event handlers.
                        result = camera.StreamGrabber.RetrieveResult( 5000, TimeoutHandling.ThrowException );
                        using (result)
                        {
                            // Nothing to do here with the grab result. The grab results are handled by the registered event handlers.
                        }
                    }

                    // Disable events.
                    Disable( camera );

                    // Print the recorded log showing the timing of events and images.
                    PrintLog();

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Using Grab Loop Thread#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample illustrates how to grab and process images using the grab loop thread
    provided by the Instant Camera class.
*/

using System;
using System.Threading;
using Basler.Pylon;

namespace Grab_UsingGrabLoopThread
{
    class Grab_UsingGrabLoopThread
    {
        // Example of an image event handler.
        static void OnImageGrabbed( Object sender, ImageGrabbedEventArgs e )
        {
            // The grab result is automatically disposed when the event call back returns.
            // The grab result can be cloned using IGrabResult.Clone if you want to keep a copy of it (not shown in this sample).
            IGrabResult grabResult = e.GrabResult;
            // Image grabbed successfully?
            if (grabResult.GrabSucceeded)
            {
                // Access the image data.
                Console.WriteLine( "SizeX: {0}", grabResult.Width );
                Console.WriteLine( "SizeY: {0}", grabResult.Height );
                byte[] buffer = grabResult.PixelData as byte[];
                Console.WriteLine( "Gray value of first pixel: {0}", buffer[0] );
                Console.WriteLine( "" );

                // Display the grabbed image.
                ImageWindow.DisplayImage( 0, grabResult );
                ImagePersistence.Save( ImageFileFormat.Bmp, "test.bmp", grabResult );
            }
            else
            {
                Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
            }
        }

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera( CameraSelectionStrategy.FirstFound ))
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Set the acquisition mode to software triggered continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.SoftwareTrigger;

                    //Open the connection to the camera device.
                    camera.Open();

                    //Check if camera supports waiting for trigger ready
                    if (camera.CanWaitForFrameTriggerReady)
                    {

                        // Set a handler for processing the images.
                        camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;

                        // Start grabbing using the grab loop thread. This is done by setting the grabLoopType parameter
                        // to GrabLoop.ProvidedByStreamGrabber. The grab results are delivered to the image event handler OnImageGrabbed.
                        // The default grab strategy (GrabStrategy_OneByOne) is used.
                        camera.StreamGrabber.Start( GrabStrategy.OneByOne, GrabLoop.ProvidedByStreamGrabber );

                        // Wait for user input to trigger the camera or exit the loop.
                        // Software triggering is used to trigger the camera device.
                        Char key;
                        do
                        {
                            Console.WriteLine( "Press 't' to trigger the camera or 'e' to exit." );

                            key = Console.ReadKey( true ).KeyChar;
                            if ((key == 't' || key == 'T'))
                            {
                                // Execute the software trigger. Wait up to 1000 ms until the camera is ready for trigger.
                                if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling.ThrowException ))
                                {
                                    camera.ExecuteSoftwareTrigger();
                                }
                            }
                        }
                        while ((key != 'e') && (key != 'E'));

                        // Stop grabbing.
                        camera.StreamGrabber.Stop();
                    }
                    else
                    {
                        Console.WriteLine( "This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger." );
                    }

                    // Close the connection to the camera device.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Grab Using Sequencer#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample shows how to grab images using the sequencer feature of a camera.
    Three sequence sets are used for image acquisition. Each sequence set
    uses a different image height.
*/

using System;
using Basler.Pylon;

namespace Grab_UsingSequencer
{
    class Grab_UsingSequencer
    {
        private static Version sfnc2_0_0 = new Version(2, 0, 0);
        // Number of images to be grabbed.
        private static UInt32 countOfImagesToGrab = 10;

        // OnImageGrabbed is used to print the image information like Width, Height etc..
        // Can be used to implement other functionality for image grab event.
        private static void OnImageGrabbed( Object sender, ImageGrabbedEventArgs e )
        {
            if (e.GrabResult.GrabSucceeded)
            {
                Console.WriteLine( "SizeX:{0}", e.GrabResult.Width );
                Console.WriteLine( "SizeY:{0}", e.GrabResult.Height );
                byte[] pixelData = (byte[])e.GrabResult.PixelData;
                Console.WriteLine( "Gray value of first pixel:{0}", pixelData[0] );
            }
            else
            {
                Console.WriteLine( "Error Code: {0} Error Description: {1}", e.GrabResult.ErrorCode, e.GrabResult.ErrorDescription );
            }
        }


        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object selecting the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Change default configuration to enable software triggering.
                    camera.CameraOpened += Configuration.SoftwareTrigger;

                    // Open the camera.
                    camera.Open();

                    // Register image grabbed event to print frame info
                    camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;

                    // DeviceVendorName, DeviceModelName, and DeviceFirmwareVersion are string parameters.
                    Console.WriteLine( "Camera Device Information" );
                    Console.WriteLine( "=========================" );
                    Console.WriteLine( "Vendor           : {0}", camera.Parameters[PLCamera.DeviceVendorName].GetValue() );
                    Console.WriteLine( "Model            : {0}", camera.Parameters[PLCamera.DeviceModelName].GetValue() );
                    Console.WriteLine( "Firmware version : {0}", camera.Parameters[PLCamera.DeviceFirmwareVersion].GetValue() );
                    Console.WriteLine( "" );
                    Console.WriteLine( "Camera Device Settings" );
                    Console.WriteLine( "======================" );

                    // Can the camera device be queried whether it is ready to accept the next frame trigger?
                    if (camera.CanWaitForFrameTriggerReady)
                    {
                        // bool for testing if sequencer is available or not
                        bool sequencerAvailable = false;

                        if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                        {
                            if (camera.Parameters[PLCamera.SequenceEnable].IsWritable)
                            {
                                sequencerAvailable = true; //Sequencer is available that is why it is true.

                                // Disable the sequencer before changing parameters.
                                // The parameters under control of the sequencer are locked
                                // when the sequencer is enabled. For a list of parameters
                                // controlled by the sequencer, see the Basler Product Documentation or the camera user's manual.
                                camera.Parameters[PLCamera.SequenceEnable].SetValue( false );
                                camera.Parameters[PLCamera.SequenceConfigurationMode].TrySetValue(PLCamera.SequenceConfigurationMode.Off);

                                // Maximize the image area of interest (Image AOI).
                                camera.Parameters[PLCamera.OffsetX].TrySetValue( camera.Parameters[PLCamera.OffsetX].GetMinimum() );
                                camera.Parameters[PLCamera.OffsetY].TrySetValue( camera.Parameters[PLCamera.OffsetY].GetMinimum() );
                                camera.Parameters[PLCamera.Width].SetValue( camera.Parameters[PLCamera.Width].GetMaximum() );
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetMaximum() );

                                // Set the pixel data format.
                                // This parameter may be locked when the sequencer is enabled.
                                camera.Parameters[PLCamera.PixelFormat].SetValue( PLCamera.PixelFormat.Mono8 );


                                // Set up sequence sets.

                                // Turn configuration mode on
                                camera.Parameters[PLCamera.SequenceConfigurationMode].TrySetValue(PLCamera.SequenceConfigurationMode.On);

                                // Configure how the sequence will advance.
                                // 'Auto' refers to the auto sequence advance mode.
                                // The advance from one sequence set to the next will occur automatically with each image acquired.
                                // After the end of the sequence set cycle was reached a new sequence set cycle will start.
                                camera.Parameters[PLCamera.SequenceAdvanceMode].SetValue( PLCamera.SequenceAdvanceMode.Auto );

                                // Our sequence sets relate to three steps (0..2).
                                // In each step we will increase the height of the Image AOI by one increment.
                                camera.Parameters[PLCamera.SequenceSetTotalNumber].SetValue( 3 );

                                long increments = (camera.Parameters[PLCamera.Height].GetMaximum() - camera.Parameters[PLCamera.Height].GetMinimum()) / camera.Parameters[PLCamera.Height].GetIncrement();

                                // Set the parameters for step 0; quarter height image.
                                camera.Parameters[PLCamera.SequenceSetIndex].SetValue( 0 );
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 4) + camera.Parameters[PLCamera.Height].GetMinimum() );
                                camera.Parameters[PLCamera.SequenceSetStore].Execute();

                                // Set the parameters for step 1; half height image.
                                camera.Parameters[PLCamera.SequenceSetIndex].SetValue( 1 );
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 2) + camera.Parameters[PLCamera.Height].GetMinimum() );
                                camera.Parameters[PLCamera.SequenceSetStore].Execute();

                                // Set the parameters for step 2; full height image.
                                camera.Parameters[PLCamera.SequenceSetIndex].SetValue( 2 );
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetIncrement() * (increments) + camera.Parameters[PLCamera.Height].GetMinimum() );
                                camera.Parameters[PLCamera.SequenceSetStore].Execute();

                                // Finish configuration
                                if (camera.Parameters[PLCamera.SequenceConfigurationMode].IsWritable)
                                {
                                    camera.Parameters[PLCamera.SequenceConfigurationMode].SetValue( PLCamera.SequenceConfigurationMode.Off );
                                }

                                // Enable the sequencer feature.
                                // From here on you cannot change the sequencer settings anymore.
                                camera.Parameters[PLCamera.SequenceEnable].SetValue( true );

                                // Start the grabbing of countOfImagesToGrab images.
                                camera.StreamGrabber.Start( countOfImagesToGrab );
                            }
                            else
                            {
                                sequencerAvailable = false; // Sequencer not available
                            }
                        }
                        else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                        {
                            if (camera.Parameters[PLCamera.SequencerMode].IsWritable)
                            {
                                sequencerAvailable = true;

                                // Disable the sequencer before changing parameters.
                                // The parameters under control of the sequencer are locked
                                // when the sequencer is enabled. For a list of parameters
                                // controlled by the sequencer, see the Basler Product Documentation or the camera user's manual.
                                camera.Parameters[PLCamera.SequencerMode].SetValue( PLCamera.SequencerMode.Off );
                                camera.Parameters[PLCamera.SequenceConfigurationMode].TrySetValue(PLCamera.SequenceConfigurationMode.Off);

                                // Maximize the image area of interest (Image AOI).
                                camera.Parameters[PLCamera.OffsetX].TrySetValue( camera.Parameters[PLCamera.OffsetX].GetMinimum() );
                                camera.Parameters[PLCamera.OffsetY].TrySetValue( camera.Parameters[PLCamera.OffsetY].GetMinimum() );
                                camera.Parameters[PLCamera.Width].SetValue( camera.Parameters[PLCamera.Width].GetMaximum() );
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetMaximum() );

                                // Set the pixel data format.
                                // This parameter may be locked when the sequencer is enabled.
                                camera.Parameters[PLCamera.PixelFormat].SetValue( PLCamera.PixelFormat.Mono8 );

                                // Turn configuration mode on if available.
                                // Not supported by all cameras.
                                camera.Parameters[PLCamera.SequencerConfigurationMode].TrySetValue( PLCamera.SequencerConfigurationMode.On );

                                // Configure how the sequence will advance.

                                // The sequence sets relate to three steps (0..2).
                                // In each step, the height of the Image AOI is doubled.

                                long increments = (camera.Parameters[PLCamera.Height].GetMaximum() - camera.Parameters[PLCamera.Height].GetMinimum()) / camera.Parameters[PLCamera.Height].GetIncrement();

                                long initialSet = camera.Parameters[PLCamera.SequencerSetSelector].GetMinimum();
                                long incSet = camera.Parameters[PLCamera.SequencerSetSelector].GetIncrement();
                                long curSet = initialSet;

                                // Set the parameters for step 0; quarter height image.
                                camera.Parameters[PLCamera.SequencerSetSelector].SetValue( initialSet );
                                {
                                    // valid for all sets
                                    // reset on software signal 1;
                                    camera.Parameters[PLCamera.SequencerPathSelector].SetValue( 0 );
                                    camera.Parameters[PLCamera.SequencerSetNext].SetValue( initialSet );
                                    camera.Parameters[PLCamera.SequencerTriggerSource].SetValue( PLCamera.SequencerTriggerSource.SoftwareSignal1 );
                                    // advance on Frame Start or Exposure Start( depends on camera family )
                                    camera.Parameters[PLCamera.SequencerPathSelector].SetValue( 1 );
                                    camera.Parameters[PLCamera.SequencerTriggerSource].SetValue( new[] { "FrameStart", "ExposureStart" } );
                                }
                                camera.Parameters[PLCamera.SequencerSetNext].SetValue( curSet + incSet );

                                // Set the parameters for step 0; quarter height image.
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 4) + camera.Parameters[PLCamera.Height].GetMinimum() );
                                camera.Parameters[PLCamera.SequencerSetSave].Execute();

                                // Set the parameters for step 1; half height image.
                                curSet += incSet;
                                camera.Parameters[PLCamera.SequencerSetSelector].SetValue( curSet );
                                // advance on Frame Start to next set
                                camera.Parameters[PLCamera.SequencerSetNext].SetValue( curSet + incSet );
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 2) + camera.Parameters[PLCamera.Height].GetMinimum() );
                                camera.Parameters[PLCamera.SequencerSetSave].Execute();

                                // Set the parameters for step 2; full height image.
                                curSet += incSet;
                                camera.Parameters[PLCamera.SequencerSetSelector].SetValue( curSet );
                                // advance on Frame End to initial set,
                                camera.Parameters[PLCamera.SequencerSetNext].SetValue( initialSet ); // terminates sequence definition
                                                                                                     // full height
                                camera.Parameters[PLCamera.Height].SetValue( camera.Parameters[PLCamera.Height].GetIncrement() * increments + camera.Parameters[PLCamera.Height].GetMinimum() );
                                camera.Parameters[PLCamera.SequencerSetSave].Execute();

                                // Enable the sequencer feature.
                                // From here on you cannot change the sequencer settings anymore.
                                camera.Parameters[PLCamera.SequencerConfigurationMode].SetValue( PLCamera.SequencerConfigurationMode.Off );
                                camera.Parameters[PLCamera.SequencerMode].SetValue( PLCamera.SequencerMode.On );

                                // Start the grabbing of countOfImagesToGrab images.
                                camera.StreamGrabber.Start( countOfImagesToGrab );
                            }
                            else
                            {
                                sequencerAvailable = false; // Sequencer not available
                            }
                        }

                        if (sequencerAvailable)
                        {
                            IGrabResult result;
                            // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
                            // when countOfImagesToGrab images have been retrieved.
                            while (camera.StreamGrabber.IsGrabbing)
                            {
                                // Execute the software trigger. Wait up to 1000 ms for the camera to be ready for trigger.
                                if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling.ThrowException ))
                                {
                                    camera.ExecuteSoftwareTrigger();

                                    // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                                    result = camera.StreamGrabber.RetrieveResult( 5000, TimeoutHandling.ThrowException );

                                    using (result)
                                    {
                                        // Image grabbed successfully?
                                        if (result.GrabSucceeded)
                                        {
                                            // Display the grabbed image.
                                            ImageWindow.DisplayImage( 1, result );
                                        }
                                        else
                                        {
                                            Console.WriteLine( "Error code:{0} Error description:{1}", result.ErrorCode, result.ErrorDescription );
                                        }
                                    }
                                }

                                // Wait for user input.
                                Console.WriteLine( "Press enter to continue." );
                                Console.ReadLine();
                            }

                            // Disable the sequencer.
                            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                            {
                                camera.Parameters[PLCamera.SequenceEnable].SetValue( false );
                            }
                            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                            {
                                camera.Parameters[PLCamera.SequencerMode].SetValue( PLCamera.SequencerMode.Off );
                            }
                            camera.Parameters[PLCamera.SequenceConfigurationMode].TrySetValue(PLCamera.SequenceConfigurationMode.Off);
                        }
                        else
                        {
                            Console.WriteLine( "The sequencer feature is not available for this camera." );
                        }
                    }
                    else
                    {
                        Console.WriteLine( "This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger." );
                    }

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine( "\nPress enter to exit." );
            Console.ReadLine();

            Environment.Exit( exitCode );
        }
    }
}

Parameterize Camera#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    For camera configuration and for accessing other parameters, the pylon API
    uses the technologies defined by the GenICam standard hosted by the
    European Machine Vision Association (EMVA). The GenICam specification
    (http://www.GenICam.org) defines a format for camera description files.
    These files describe the configuration interface of GenICam compliant cameras.
    The description files are written in XML (eXtensible Markup Language) and
    describe camera registers, their interdependencies, and all other
    information needed to access high-level features. This includes features such as Gain,
    Exposure Time, or Pixel Format. The features are accessed by means of low level
    register read and write operations.
    The elements of a camera description file are represented as parameter objects.
    For example, a parameter object can represent a single camera
    register, a camera parameter such as Gain, or a set of available parameter
    values.
*/

using System;
using Basler.Pylon;

namespace ParameterizeCamera
{
    class ParameterizeCamera
    {
        static Version sfnc2_0_0 = new Version(2, 0, 0);

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    /*************************************************************************
                        * Accessing camera parameters                                           *
                        *************************************************************************/

                    // Before accessing camera device parameters, the camera must be opened.
                    camera.Open();

                    // Parameters are accessed using parameter lists. Parameter lists contain a set of parameter names
                    // analogous to enumerations of a programming language. Here, the parameter list PLCamera is used.
                    // PLCamera contains a list of parameter names of all camera device types. Additional device-specific
                    // parameter lists are available, e.g. PLUsbCamera for USB camera devices.


                    // DeviceVendorName, DeviceModelName, and DeviceFirmwareVersion are string parameters.
                    Console.WriteLine( "Camera Device Information" );
                    Console.WriteLine( "=========================" );
                    Console.WriteLine( "Vendor           : {0}", camera.Parameters[PLCamera.DeviceVendorName].GetValue() );
                    Console.WriteLine( "Model            : {0}", camera.Parameters[PLCamera.DeviceModelName].GetValue() );
                    Console.WriteLine( "Firmware version : {0}", camera.Parameters[PLCamera.DeviceFirmwareVersion].GetValue() );
                    Console.WriteLine( "" );
                    Console.WriteLine( "Camera Device Settings" );
                    Console.WriteLine( "======================" );


                    // Setting the AOI. OffsetX, OffsetY, Width, and Height are integer parameters.
                    // On some cameras, the offsets are read-only. If they are writable, set the offsets to min.
                    if(camera.Parameters[PLCamera.OffsetX].TrySetToMinimum())
                    {
                        Console.WriteLine( "OffsetX          : {0}", camera.Parameters[PLCamera.OffsetX].GetValue() );
                    }
                    if(camera.Parameters[PLCamera.OffsetY].TrySetToMinimum())
                    {
                        Console.WriteLine( "OffsetY          : {0}", camera.Parameters[PLCamera.OffsetY].GetValue() );
                    }

                    // Some parameters have restrictions. You can use GetIncrement/GetMinimum/GetMaximum to make sure you set a valid value.
                    // Here, we let pylon correct the value if needed.
                    camera.Parameters[PLCamera.Width].SetValue( 202, IntegerValueCorrection.Nearest );
                    camera.Parameters[PLCamera.Height].SetValue( 101, IntegerValueCorrection.Nearest );
                    Console.WriteLine( "Width            : {0}", camera.Parameters[PLCamera.Width].GetValue() );
                    Console.WriteLine( "Height           : {0}", camera.Parameters[PLCamera.Height].GetValue() );


                    // Set an enum parameter.
                    string oldPixelFormat = camera.Parameters[PLCamera.PixelFormat].GetValue(); // Remember the current pixel format.
                    Console.WriteLine( "Old PixelFormat  : {0} ({1})", camera.Parameters[PLCamera.PixelFormat].GetValue(), oldPixelFormat );

                    // Set pixel format to Mono8 if available.
                    if (camera.Parameters[PLCamera.PixelFormat].TrySetValue( PLCamera.PixelFormat.Mono8 ))
                    {
                        Console.WriteLine( "New PixelFormat  : {0} ({1})", camera.Parameters[PLCamera.PixelFormat].GetValue(), oldPixelFormat );
                    }


                    // Some camera models may have auto functions enabled. To set the gain value to a specific value,
                    // the Gain Auto function must be disabled first (if gain auto is available).
                    camera.Parameters[PLCamera.GainAuto].TrySetValue( PLCamera.GainAuto.Off ); // Set GainAuto to Off if it is writable.

                    // Features, e.g. 'Gain', are named according to the GenICam Standard Feature Naming Convention (SFNC).
                    // The SFNC defines a common set of features, their behavior, and the related parameter names.
                    // This ensures the interoperability of cameras from different camera vendors.
                    // Current Basler USB ace and all Basler ace 2 cameras models are based on SFNC version 2.0.
                    // Older Basler GigE cameras are based on previous SFNC versions.
                    // Accordingly, the behavior of these cameras and some parameters names will be different.
                    // The SFNC version can be used to handle differences between camera device models.
                    if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                    {
                        // In previous SFNC versions, GainRaw is an integer parameter.
                        camera.Parameters[PLCamera.GainRaw].TrySetValuePercentOfRange( 50 );
                        // GammaEnable is a boolean parameter.
                        camera.Parameters[PLCamera.GammaEnable].TrySetValue( true );
                    }
                    else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                    {
                        // In SFNC 2.0, Gain is a float parameter.
                        camera.Parameters[PLUsbCamera.Gain].TrySetValuePercentOfRange( 50 );
                        // For USB cameras, Gamma is always enabled.
                    }


                    /*************************************************************************
                        * Parameter access status                                               *
                        *************************************************************************/

                    // Each parameter is either readable or writable or both.
                    // Depending on the camera's state, a parameter may temporarily not be readable or writable.
                    // For example, a parameter related to external triggering may not be available when the camera is in free run mode.
                    // Additionally, parameters can be read-only by default.
                    Console.WriteLine( "OffsetX readable        : {0}", camera.Parameters[PLCamera.OffsetX].IsReadable );
                    Console.WriteLine( "TriggerSoftware writable: {0}", camera.Parameters[PLCamera.TriggerSoftware].IsWritable );


                    /*************************************************************************
                        * Empty parameters                                                      *
                        *************************************************************************/

                    // Camera models have different parameter sets available. For example, GammaEnable is not part of USB camera device
                    // parameters. If a requested parameter does not exist, an empty parameter object will be returned to simplify handling.
                    // Therefore, an additional existence check is not necessary.
                    // An empty parameter is never readable or writable.
                    Console.WriteLine( "GammaEnable writable    : {0}", camera.Parameters[PLCamera.GammaEnable].IsWritable );
                    Console.WriteLine( "GammaEnable readable    : {0}", camera.Parameters[PLCamera.GammaEnable].IsReadable );
                    Console.WriteLine( "GammaEnable empty       : {0}", camera.Parameters[PLCamera.GammaEnable].IsEmpty );


                    /*************************************************************************
                        * Try or GetValueOrDefault methods                                      *
                        *************************************************************************/

                    // Several parameters provide Try or GetValueOrDefault methods. These methods are provided because
                    // a parameter may not always be available, either because the camera device model does not support the parameter
                    // or because the parameter is temporarily disabled (due to other parameter settings).
                    camera.Parameters[PLCamera.GammaEnable].TrySetValue( true ); // If the GammaEnable parameter is writable, enable it.

                    // Toggle CenterX to change the availability of OffsetX.
                    // If CenterX is readable, get the value. Otherwise, return false.
                    bool centerXValue = camera.Parameters[PLCamera.CenterX].GetValueOrDefault(false);
                    Console.WriteLine( "CenterX                 : {0}", centerXValue );
                    Console.WriteLine( "OffsetX writable        : {0}", camera.Parameters[PLCamera.OffsetX].IsWritable );
                    camera.Parameters[PLCamera.CenterX].TrySetValue( !centerXValue ); // Toggle CenterX if CenterX is writable.
                    Console.WriteLine( "CenterX                 : {0}", camera.Parameters[PLCamera.CenterX].GetValueOrDefault( false ) );
                    Console.WriteLine( "OffsetX writable        : {0}", camera.Parameters[PLCamera.OffsetX].IsWritable );
                    camera.Parameters[PLCamera.CenterX].TrySetValue( centerXValue ); // Restore the value of CenterX if CenterX is writable.

                    // Important: The Try and the GetValueOrDefault methods are usually related to the access status (IsWritable or IsReadable) of a parameter.
                    // For more information, check the summary of the methods.

                    // There are additional methods available that provide support for setting valid values.
                    // Set the width and correct the value to the nearest valid increment.
                    camera.Parameters[PLCamera.Width].SetValue( 202, IntegerValueCorrection.Nearest );
                    // Set the width and correct the value to the nearest valid increment if the width parameter is readable and writable.
                    camera.Parameters[PLCamera.Width].TrySetValue( 202, IntegerValueCorrection.Nearest );
                    // One of the following pixel formats should be available:
                    string[] pixelFormats = new string[]
                    {
                        PLCamera.PixelFormat.BayerBG8,
                        PLCamera.PixelFormat.BayerRG8,
                        PLCamera.PixelFormat.BayerGR8,
                        PLCamera.PixelFormat.BayerGB8,
                        PLCamera.PixelFormat.Mono8
                    };
                    camera.Parameters[PLCamera.PixelFormat].SetValue( pixelFormats ); //Set the first valid pixel format in the list.
                    camera.Parameters[PLCamera.PixelFormat].TrySetValue( pixelFormats ); //Set the first valid pixel format in the list if PixelFormat is writable.
                    Console.WriteLine( "New PixelFormat  : {0}", camera.Parameters[PLCamera.PixelFormat].GetValue() );


                    /*************************************************************************
                        * Optional: Accessing camera parameters without using a parameter list  *
                        *************************************************************************/

                    // Accessing parameters without using a parameter list can be necessary in rare cases,
                    // e.g. if you want to set newly added camera parameters that are not added to a parameter list yet.
                    // It is recommended to use parameter lists if possible to avoid using the wrong parameter type and
                    // to avoid spelling errors.

                    // When accessing parameters, the name and the type must usually be known beforehand.
                    // The following syntax can be used to access any camera device parameter.
                    // Adjust the parameter name ("BrandNewFeature") and the parameter type (IntegerName, EnumName, FloatName, etc.)
                    // according to the parameter that you want to access.
                    camera.Parameters[(IntegerName)"BrandNewFeature"].TrySetToMaximum(); // TrySetToMaximum is called for demonstration purposes only.

                    // This is another alternative to access a parameter without using a parameter list
                    // shown for completeness only
                    IIntegerParameter brandNewFeature = camera.Parameters["BrandNewFeature"] as IIntegerParameter;
                    // brandNewFeature will be null if it is not present because it cannot be casted to IIntegerParameter
                    if (brandNewFeature != null)
                    {
                        brandNewFeature.TrySetToMaximum();
                    }

                    // Enumeration values are plain strings.
                    // Similar to the example above, the pixel format is set to Mono8, this time without using a parameter list.
                    if (camera.Parameters[(EnumName)"PixelFormat"].TrySetValue( "Mono8" ))
                    {
                        Console.WriteLine( "New PixelFormat  : {0}", camera.Parameters[(EnumName)"PixelFormat"].GetValue() );
                    }

                    // Restore the old pixel format.
                    camera.Parameters[PLCamera.PixelFormat].SetValue( oldPixelFormat );

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine( "\nPress enter to exit." );
            Console.ReadLine();

            Environment.Exit( exitCode );
        }
    }
}

VB

'For camera configuration and for accessing other parameters, the pylon API
'uses the technologies defined by the GenICam standard hosted by the
'European Machine Vision Association (EMVA). The GenICam specification
'(http://www.GenICam.org) defines a format for camera description files.
'These files describe the configuration interface of GenICam compliant cameras.
'The description files are written in XML (eXtensible Markup Language) and
'describe camera registers, their interdependencies, and all other
'information needed to access high-level features. This includes features such as Gain,
'Exposure Time, or Pixel Format. The features are accessed by means of low level
'register read and write operations.

'The elements of a camera description file are represented as parameter objects.
'For example, a parameter object can represent a single camera
'register, a camera parameter such as Gain, or a set of available parameter
'values.

Imports Basler.Pylon

Public Class ParameterizeCamera

    Shared Sfnc2_0_0 As New Version(2, 0, 0)

    Shared Sub Main()
        ' The exit code of the sample application.
        Dim exitCode As Integer = 0

        Try
            ' Create a camera object that selects the first camera device found.
            ' More constructors are available for selecting a specific camera device.
            Using camera As New Camera()
                '************************************************************************
                '* Accessing camera parameters                                          *
                '************************************************************************


                ' Before accessing camera device parameters the camera must be opened.
                camera.Open()

                ' Parameters are accessed using parameter lists. Parameter lists contain a set of parameter names
                ' analogous to enumerations of a programming language. Here the parameter list PLCamera is used.
                ' PLCamera contains a list of parameter names of all camera device types. Additional device-specific
                ' parameter lists are available, e.g. PLUsbCamera for USB camera devices.


                ' DeviceVendorName, DeviceModelName, and DeviceFirmwareVersion are string parameters.
                Console.WriteLine("Camera Device Information")
                Console.WriteLine("=========================")
                Console.WriteLine("Vendor           : {0}", camera.Parameters(PLCamera.DeviceVendorName).GetValue())
                Console.WriteLine("Model            : {0}", camera.Parameters(PLCamera.DeviceModelName).GetValue())
                Console.WriteLine("Firmware version : {0}", camera.Parameters(PLCamera.DeviceFirmwareVersion).GetValue())
                Console.WriteLine("")
                Console.WriteLine("Camera Device Settings")
                Console.WriteLine("======================")


                ' Setting the AOI. OffsetX, OffsetY, Width, and Height are integer parameters.
                ' On some cameras, the offsets are read-only. If they are writable, set the offsets to min.
                camera.Parameters(PLCamera.OffsetX).TrySetToMinimum()
                camera.Parameters(PLCamera.OffsetY).TrySetToMinimum()
                ' Some parameters have restrictions. You can use GetIncrement/GetMinimum/GetMaximum to make sure you set a valid value.
                ' Here, we let pylon correct the value if needed.
                camera.Parameters(PLCamera.Width).SetValue(202, IntegerValueCorrection.Nearest)
                camera.Parameters(PLCamera.Height).SetValue(101, IntegerValueCorrection.Nearest)

                If camera.Parameters(PLCamera.OffsetX).IsReadable Then
                    Console.WriteLine("OffsetX          : {0}", camera.Parameters(PLCamera.OffsetX).GetValue())
                End If
                If camera.Parameters(PLCamera.OffsetY).IsReadable Then
                    Console.WriteLine("OffsetY          : {0}", camera.Parameters(PLCamera.OffsetY).GetValue())
                End If

                Console.WriteLine("Width            : {0}", camera.Parameters(PLCamera.Width).GetValue())
                Console.WriteLine("Height           : {0}", camera.Parameters(PLCamera.Height).GetValue())


                ' Set an enum parameter.
                Dim oldPixelFormat As String = camera.Parameters(PLCamera.PixelFormat).GetValue()
                ' Remember the current pixel format.
                Console.WriteLine("Old PixelFormat  : {0} ({1})", camera.Parameters(PLCamera.PixelFormat).GetValue(), oldPixelFormat)

                ' Set pixel format to Mono8 if available.
                If camera.Parameters(PLCamera.PixelFormat).TrySetValue(PLCamera.PixelFormat.Mono8) Then
                    Console.WriteLine("New PixelFormat  : {0} ({1})", camera.Parameters(PLCamera.PixelFormat).GetValue(), oldPixelFormat)
                End If

                ' Some camera models may have auto functions enabled. To set the gain value to a specific value,
                ' the Gain Auto function must be disabled first (if gain auto is available).
                camera.Parameters(PLCamera.GainAuto).TrySetValue(PLCamera.GainAuto.Off) ' Set GainAuto to Off if it is writable.


                ' Features, e.g. 'Gain', are named according to the GenICam Standard Feature Naming Convention (SFNC).
                ' The SFNC defines a common set of features, their behavior, and the related parameter names.
                ' This ensures the interoperability of cameras from different camera vendors.
                ' Cameras compliant with the USB3 Vision standard are based on the SFNC version 2.0.
                ' Current Basler USB ace and all Basler ace 2 cameras models are based on SFNC version 2.0.
                ' Older Basler GigE cameras are based on previous SFNC versions.
                ' The SFNC version can be used to handle differences between camera device models.
                If camera.GetSfncVersion() < Sfnc2_0_0 Then
                    ' In previous SFNC versions, GainRaw is an integer parameter.
                    camera.Parameters(PLCamera.GainRaw).TrySetValuePercentOfRange(50)
                    ' GammaEnable is a boolean parameter.
                    camera.Parameters(PLCamera.GammaEnable).TrySetValue(True)
                Else
                    ' For SFNC 2.0 cameras, e.g. USB3 Vision cameras
                    ' In SFNC 2.0, Gain is a float parameter.
                    ' For USB cameras, Gamma is always enabled.
                    camera.Parameters(PLUsbCamera.Gain).TrySetValuePercentOfRange(50)
                End If


                '************************************************************************
                '* Parameter access status                                              *
                '************************************************************************


                ' Each parameter is either readable or writable or both.
                ' Depending on the camera's state, a parameter may temporarily not be readable or writable.
                ' For example, a parameter related to external triggering may not be available when the camera is in free run mode.
                ' Additionally, parameters can be read-only by default.
                Console.WriteLine("OffsetX readable        : {0}", camera.Parameters(PLCamera.OffsetX).IsReadable)
                Console.WriteLine("TriggerSoftware writable: {0}", camera.Parameters(PLCamera.TriggerSoftware).IsWritable)


                '************************************************************************
                '* Empty parameters                                                     *
                '************************************************************************


                ' Camera models have different parameter sets available. For example, GammaEnable is not part of USB camera device
                ' parameters. If a requested parameter does not exist, an empty parameter object will be returned to simplify handling.
                ' Therefore, an additional existence check is not necessary.
                ' An empty parameter is never readable or writable.
                Console.WriteLine("GammaEnable writable    : {0}", camera.Parameters(PLCamera.GammaEnable).IsWritable)
                Console.WriteLine("GammaEnable readable    : {0}", camera.Parameters(PLCamera.GammaEnable).IsReadable)
                Console.WriteLine("GammaEnable empty       : {0}", camera.Parameters(PLCamera.GammaEnable).IsEmpty)


                '************************************************************************
                '* Try or GetValueOrDefaultmethods                                      *
                '************************************************************************


                ' Several parameters provide Try or GetValueOrDefault methods. These methods are provided because
                ' a parameter may not always be available, either because the camera device model does not support the parameter
                ' or because the parameter is temporarily disabled (due to other parameter settings).
                ' If the GammaEnable parameter is writable, enable it.
                camera.Parameters(PLCamera.GammaEnable).TrySetValue(True)
                ' Toggle CenterX to change the availability of OffsetX.
                ' If CenterX is readable, get the value. Otherwise, return false.
                Dim centerXValue As Boolean = camera.Parameters(PLCamera.CenterX).GetValueOrDefault(False)
                Console.WriteLine("CenterX                 : {0}", centerXValue)
                Console.WriteLine("OffsetX writable        : {0}", camera.Parameters(PLCamera.OffsetX).IsWritable)
                ' Toggle CenterX if CenterX is writable.
                camera.Parameters(PLCamera.CenterX).TrySetValue(Not centerXValue)
                Console.WriteLine("CenterX                 : {0}", camera.Parameters(PLCamera.CenterX).GetValueOrDefault(False))
                Console.WriteLine("OffsetX writable        : {0}", camera.Parameters(PLCamera.OffsetX).IsWritable)
                ' Restore the value of CenterX if CenterX is writable.
                camera.Parameters(PLCamera.CenterX).TrySetValue(centerXValue)
                ' Important: The Try and the GetValueOrDefault methods are usually related to the access status (IsWritable or IsReadable) of a parameter.
                ' For more information, check the summary of the methods.

                ' There are additional methods available that provide support for setting valid values.
                ' Set the width and correct the value to the nearest valid increment.
                camera.Parameters(PLCamera.Width).SetValue(202, IntegerValueCorrection.Nearest)
                ' Set the width and correct the value to the nearest valid increment if width is readable and writable
                camera.Parameters(PLCamera.Width).TrySetValue(202, IntegerValueCorrection.Nearest)
                ' One of the following pixel formats should be available:
                Dim pixelFormats As String() = New String() {PLCamera.PixelFormat.BayerBG8, PLCamera.PixelFormat.BayerRG8, PLCamera.PixelFormat.BayerGR8, PLCamera.PixelFormat.BayerGB8, PLCamera.PixelFormat.Mono8}
                'Set the first valid pixel format in the list.
                camera.Parameters(PLCamera.PixelFormat).SetValue(pixelFormats)
                'Set the first valid pixel format in the list if PixelFormat is writable.
                camera.Parameters(PLCamera.PixelFormat).TrySetValue(pixelFormats)
                Console.WriteLine("New PixelFormat  : {0}", camera.Parameters(PLCamera.PixelFormat).GetValue())


                '************************************************************************
                '* Optional: Accessing camera parameters without using a parameter list *
                '************************************************************************


                ' Accessing parameters without using a parameter list can be necessary in rare cases,
                ' e.g. if you want to set newly added camera parameters that are not added to a parameter list yet.
                ' It is recommended to use parameter lists if possible to avoid using the wrong parameter type and
                ' to avoid spelling errors.

                ' When accessing parameters, the name and the type must usually be known beforehand.
                ' The following syntax can be used to access any camera device parameter.
                ' Adjust the parameter name ("BrandNewFeature") and the parameter type (IntegerName, EnumName, FloatName, etc.)
                ' according to the parameter that you want to access.
                camera.Parameters(CType("BrandNewFeature", IntegerName)).TrySetToMaximum()

                ' This is another alternative to access a parameter without using a parameter list
                ' shown for completeness only
                Dim brandNewFeature As IIntegerParameter = TryCast(camera.Parameters("BrandNewFeature"), IIntegerParameter)
                ' brandNewFeature will be Nothing if it is not present because it cannot be casted to IIntegerParameter
                If brandNewFeature IsNot Nothing Then
                    brandNewFeature.TrySetToMaximum()
                End If

                ' TrySetToMaximum is called for demonstration purposes only.
                ' Enumeration values are plain strings.
                ' Similar to the example above, the pixel format is set to Mono8, this time without using a parameter list.
                If camera.Parameters(CType("PixelFormat", EnumName)).TrySetValue("Mono8") Then
                    Console.WriteLine("New PixelFormat  : {0}", camera.Parameters(CType("PixelFormat", EnumName)).GetValue())
                End If

                ' Restore the old pixel format.
                camera.Parameters(PLCamera.PixelFormat).SetValue(oldPixelFormat)

                ' Close the camera.
                camera.Close()
            End Using
        Catch e As Exception
            Console.[Error].WriteLine("Exception: {0}", e.Message)
            exitCode = 1
        End Try

        ' Comment the following two lines to disable waiting on exit.
        Console.[Error].WriteLine(vbLf & "Press enter to exit.")
        Console.ReadLine()

        Environment.[Exit](exitCode)
    End Sub
End Class

Parameterize Camera Autofunction#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample illustrates how to use the Auto Functions feature of Basler cameras.

    Features, like 'Gain', are named according to the Standard Feature Naming Convention (SFNC).
    The SFNC defines a common set of features, their behavior, and the related parameter names.
    This ensures the interoperability of cameras from different camera vendors.
    Current Basler USB ace and all Basler ace 2 cameras models are based on SFNC version 2.0.
    Older Basler GigE cameras are based on previous SFNC versions.
    Accordingly, the behavior of these cameras and some parameters names will be different.
*/

using System;
using System.Collections.Generic;
using Basler.Pylon;

namespace ParameterizeCamera_AutoFunctions
{
    class ParameterizeCamera_AutoFunctions
    {
        private static Version sfnc2_0_0 = new Version(2, 0, 0);
        private static EnumName regionSelector;
        private static IntegerName regionSelectorWidth, regionSelectorHeight, regionSelectorOffsetX, regionSelectorOffsetY;
        private static String regionSelectorValue1, regionSelectorValue2;
        private static FloatName balanceRatio, exposureTime;
        private static BooleanName autoFunctionAOIROIUseBrightness, autoFunctionAOIROIUseWhiteBalance;

        static void Configure( Camera camera )
        {
            if (camera.GetSfncVersion() < sfnc2_0_0)  // Handling for older cameras
            {
                regionSelector = PLCamera.AutoFunctionAOISelector;
                regionSelectorOffsetX = PLCamera.AutoFunctionAOIOffsetX;
                regionSelectorOffsetY = PLCamera.AutoFunctionAOIOffsetY;
                regionSelectorWidth = PLCamera.AutoFunctionAOIWidth;
                regionSelectorHeight = PLCamera.AutoFunctionAOIHeight;
                regionSelectorValue1 = PLCamera.AutoFunctionAOISelector.AOI1;
                regionSelectorValue2 = PLCamera.AutoFunctionAOISelector.AOI2;
                balanceRatio = PLCamera.BalanceRatioAbs;
                exposureTime = PLCamera.ExposureTimeAbs;
                autoFunctionAOIROIUseBrightness = PLCamera.AutoFunctionAOIUsageIntensity;
                autoFunctionAOIROIUseWhiteBalance = PLCamera.AutoFunctionAOIUsageWhiteBalance;
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                regionSelector = PLCamera.AutoFunctionROISelector;
                regionSelectorOffsetX = PLCamera.AutoFunctionROIOffsetX;
                regionSelectorOffsetY = PLCamera.AutoFunctionROIOffsetY;
                regionSelectorWidth = PLCamera.AutoFunctionROIWidth;
                regionSelectorHeight = PLCamera.AutoFunctionROIHeight;
                regionSelectorValue1 = PLCamera.AutoFunctionROISelector.ROI1;
                regionSelectorValue2 = PLCamera.AutoFunctionROISelector.ROI2;
                balanceRatio = PLCamera.BalanceRatio;
                exposureTime = PLCamera.ExposureTime;
                autoFunctionAOIROIUseBrightness = PLCamera.AutoFunctionROIUseBrightness;
                autoFunctionAOIROIUseWhiteBalance = PLCamera.AutoFunctionROIUseWhiteBalance;
            }
        }

        // Check if camera is a color camera
        static bool IsColorCamera( Camera camera )
        {
            bool result = false;
            IEnumerable<String> enteries;
            enteries = camera.Parameters[PLCamera.PixelFormat].GetAllValues();
            foreach (String ent in enteries)
            {
                if (camera.Parameters[PLCamera.PixelFormat].CanSetValue( ent ))
                {
                    if (ent.Contains( "Bayer" ))
                    {
                        result = true;
                        break;
                    }
                }
            }
            return result;
        }

        static void AutoGainOnce( Camera camera )
        {
            // Check whether the gain auto function is available.
            if (!camera.Parameters[PLCamera.GainAuto].IsWritable)
            {
                Console.WriteLine( "The camera does not support Gain Auto." );
                return;
            }

            // Maximize the grabbed image area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetToMinimum();
            camera.Parameters[PLCamera.OffsetY].TrySetToMinimum();
            camera.Parameters[PLCamera.Width].SetToMaximum();
            camera.Parameters[PLCamera.Height].SetToMaximum();

            // Set the Auto Function ROI for luminance statistics.
            // We want to use ROI1 for gathering the statistics.
            if (camera.Parameters[autoFunctionAOIROIUseBrightness].IsWritable)
            {
                camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( true );// ROI 1 is used for brightness control
                camera.Parameters[regionSelector].SetValue( regionSelectorValue2 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( false );// ROI 2 is not used for brightness control
            }
            camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
            camera.Parameters[regionSelectorOffsetX].SetToMinimum();
            camera.Parameters[regionSelectorOffsetY].SetToMinimum();
            camera.Parameters[regionSelectorWidth].SetToMaximum();
            camera.Parameters[regionSelectorHeight].SetToMaximum();

            // We are going to try GainAuto = Once.
            Console.WriteLine( "Trying 'GainAuto = Once'." );
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                // Set the target value for luminance control. The value is always expressed
                // as an 8 bit value regardless of the current pixel data output format,
                // i.e., 0 -> black, 255 -> white.
                camera.Parameters[PLCamera.AutoTargetValue].SetValue( 80 );

                Console.WriteLine( "Initial Gain = {0}", camera.Parameters[PLCamera.GainRaw].GetValue() );
                // Set the gain ranges for luminance control.
                camera.Parameters[PLCamera.AutoGainRawLowerLimit].SetToMinimum();
                camera.Parameters[PLCamera.AutoGainRawUpperLimit].SetToMaximum();
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                // Set the target value for luminance control.
                // A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
                // A value of 0.4 means 40 % and so forth.
                camera.Parameters[PLCamera.AutoTargetBrightness].SetValue( 0.3 );

                Console.WriteLine( "Initial Gain = {0}", camera.Parameters[PLCamera.Gain].GetValue() );
                // Set the gain ranges for luminance control.
                camera.Parameters[PLCamera.AutoGainLowerLimit].SetToMinimum();
                camera.Parameters[PLCamera.AutoGainUpperLimit].SetToMaximum();
            }
            camera.Parameters[PLCamera.GainAuto].SetValue( PLCamera.GainAuto.Once );

            // When the "once" mode of operation is selected,
            // the parameter values are automatically adjusted until the related image property
            // reaches the target value. After the automatic parameter value adjustment is complete, the auto
            // function will automatically be set to "off" and the new parameter value will be applied to the
            // subsequently grabbed images.
            int n = 0;
            while (camera.Parameters[PLCamera.GainAuto].GetValue() != PLCamera.GainAuto.Off)
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage( 1, result );
                    }
                }
                n++;
                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep( 100 );

                //Make sure the loop is exited.
                if (n > 100)
                {
                    throw new TimeoutException( "The adjustment of auto gain did not finish." );
                }
            }

            Console.WriteLine( "GainAuto went back to 'Off' after {0} frames", n );
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                Console.WriteLine( "Final Gain = {0}", camera.Parameters[PLCamera.GainRaw].GetValue() );
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                Console.WriteLine( "Final Gain = {0}", camera.Parameters[PLCamera.Gain].GetValue() );
            }
        }


        static void AutoGainContinuous( Camera camera )
        {
            // Check whether the Gain Auto feature is available.
            if (!camera.Parameters[PLCamera.GainAuto].IsWritable)
            {
                Console.WriteLine( "The camera does not support Gain Auto." );
                return;
            }

            // Maximize the grabbed image area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetToMinimum();
            camera.Parameters[PLCamera.OffsetY].TrySetToMinimum();
            camera.Parameters[PLCamera.Width].SetToMaximum();
            camera.Parameters[PLCamera.Height].SetToMaximum();

            // Set the Auto Function ROI for luminance statistics.
            // We want to use ROI1 for gathering the statistics.
            if (camera.Parameters[autoFunctionAOIROIUseBrightness].IsWritable)
            {
                camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( true );// ROI 1 is used for brightness control
                camera.Parameters[regionSelector].SetValue( regionSelectorValue2 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( false );// ROI 2 is not used for brightness control
            }
            camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
            camera.Parameters[regionSelectorOffsetX].SetToMinimum();
            camera.Parameters[regionSelectorOffsetY].SetToMinimum();
            camera.Parameters[regionSelectorWidth].SetToMaximum();
            camera.Parameters[regionSelectorHeight].SetToMaximum();

            // We are trying GainAuto = Continuous.
            Console.WriteLine( "Trying 'GainAuto' = Continuous" );
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                // Set the target value for luminance control. The value is always expressed
                // as an 8 bit value regardless of the current pixel data output format,
                // i.e., 0 -> black, 255 -> white.
                camera.Parameters[PLCamera.AutoTargetValue].SetValue( 80 );

                Console.WriteLine( "Initial Gain = {0}", camera.Parameters[PLCamera.GainRaw].GetValue() );
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                // Set the target value for luminance control.
                // A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
                // A value of 0.4 means 40 % and so forth.
                camera.Parameters[PLCamera.AutoTargetBrightness].SetValue( 0.3 );

                Console.WriteLine( "Initial Gain = {0}", camera.Parameters[PLCamera.Gain].GetValue() );
            }
            camera.Parameters[PLCamera.GainAuto].SetValue( PLCamera.GainAuto.Continuous );

            // When "continuous" mode is selected, the parameter value is adjusted repeatedly while images are acquired.
            // Depending on the current frame rate, the automatic adjustments will usually be carried out for
            // every or every other image unless the camera's microcontroller is kept busy by other tasks.
            // The repeated automatic adjustment will proceed until the "once" mode of operation is used or
            // until the auto function is set to "off", in which case the parameter value resulting from the latest
            // automatic adjustment will operate unless the value is manually adjusted.
            for (int n = 0; n < 20; n++)            // For demonstration purposes, we will grab "only" 20 images.
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage( 1, result );
                    }
                }

                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep( 100 );
            }
            camera.Parameters[PLCamera.GainAuto].SetValue( PLCamera.GainAuto.Off ); // Switch off GainAuto.

            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                Console.WriteLine( "Final Gain = {0}", camera.Parameters[PLCamera.GainRaw].GetValue() );
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                Console.WriteLine( "Final Gain = {0}", camera.Parameters[PLCamera.Gain].GetValue() );
            }

        }


        static void AutoExposureOnce( Camera camera )
        {
            // Check whether auto exposure is available
            if (!camera.Parameters[PLCamera.ExposureAuto].IsWritable)
            {
                Console.WriteLine( "The camera does not support Exposure Auto." );
                return;
            }

            // Maximize the grabbed image area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetToMinimum();
            camera.Parameters[PLCamera.OffsetY].TrySetToMinimum();
            camera.Parameters[PLCamera.Width].SetToMaximum();
            camera.Parameters[PLCamera.Height].SetToMaximum();

            // Set the Auto Function ROI for luminance statistics.
            // We want to use ROI1 for gathering the statistics.
            if (camera.Parameters[autoFunctionAOIROIUseBrightness].IsWritable)
            {
                camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( true );// ROI 1 is used for brightness control
                camera.Parameters[regionSelector].SetValue( regionSelectorValue2 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( false );// ROI 2 is not used for brightness control
            }
            camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
            camera.Parameters[regionSelectorOffsetX].SetToMinimum();
            camera.Parameters[regionSelectorOffsetY].SetToMinimum();
            camera.Parameters[regionSelectorWidth].SetToMaximum();
            camera.Parameters[regionSelectorHeight].SetToMaximum();

            // Try ExposureAuto = Once.
            Console.WriteLine( "Trying 'ExposureAuto = Once'." );
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                // Set the target value for luminance control. The value is always expressed
                // as an 8 bit value regardless of the current pixel data output format,
                // i.e., 0 -> black, 255 -> white.
                camera.Parameters[PLCamera.AutoTargetValue].SetValue( 80 );

                Console.WriteLine( "Initial Exposure time = {0} us", camera.Parameters[PLCamera.ExposureTimeAbs].GetValue() );

                // Set the exposure time ranges for luminance control.
                camera.Parameters[PLCamera.AutoExposureTimeAbsLowerLimit].SetToMinimum();
                camera.Parameters[PLCamera.AutoExposureTimeAbsUpperLimit].SetToMaximum();
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                // Set the target value for luminance control.
                // A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
                // A value of 0.4 means 40 % and so forth.
                camera.Parameters[PLCamera.AutoTargetBrightness].SetValue( 0.3 );

                Console.WriteLine( "Initial Exposure time = {0} us", camera.Parameters[PLCamera.ExposureTime].GetValue() );

                // Set the exposure time ranges for luminance control.
                camera.Parameters[PLCamera.AutoExposureTimeLowerLimit].SetToMinimum();
                camera.Parameters[PLCamera.AutoExposureTimeUpperLimit].SetToMaximum();
            }
            camera.Parameters[PLCamera.ExposureAuto].SetValue( PLCamera.ExposureAuto.Once );

            // When the "once" mode of operation is selected,
            // the parameter values are automatically adjusted until the related image property
            // reaches the target value. After the automatic parameter value adjustment is complete, the auto
            // function will automatically be set to "off", and the new parameter value will be applied to the
            // subsequently grabbed images.
            int n = 0;
            while (camera.Parameters[PLCamera.ExposureAuto].GetValue() != PLCamera.ExposureAuto.Off)
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage( 1, result );
                    }
                }
                n++;

                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep( 100 );

                //Make sure the loop is exited.
                if (n > 100)
                {
                    throw new TimeoutException( "The adjustment of auto exposure did not finish." );
                }
            }
            Console.WriteLine( "ExposureAuto went back to 'Off' after {0} frames", n );
            Console.WriteLine( "Final Exposure Time = {0} us", camera.Parameters[exposureTime].GetValue() );
        }


        static void AutoExposureContinuous( Camera camera )
        {
            // Check whether the Exposure Auto feature is available.
            if (!camera.Parameters[PLCamera.ExposureAuto].IsWritable)
            {
                Console.WriteLine( "The camera does not support Exposure Auto." );
                return;
            }

            // Maximize the grabbed image area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetToMinimum();
            camera.Parameters[PLCamera.OffsetY].TrySetToMinimum();
            camera.Parameters[PLCamera.Width].SetToMaximum();
            camera.Parameters[PLCamera.Height].SetToMaximum();

            // Set the Auto Function ROI for luminance statistics.
            // We want to use ROI1 for gathering the statistics.
            if (camera.Parameters[autoFunctionAOIROIUseBrightness].IsWritable)
            {
                camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( true );// ROI 1 is used for brightness control
                camera.Parameters[regionSelector].SetValue( regionSelectorValue2 );
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( false );// ROI 2 is not used for brightness control
            }
            camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
            camera.Parameters[regionSelectorOffsetX].SetToMinimum();
            camera.Parameters[regionSelectorOffsetY].SetToMinimum();
            camera.Parameters[regionSelectorWidth].SetToMaximum();
            camera.Parameters[regionSelectorHeight].SetToMaximum();

            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                // Set the target value for luminance control. The value is always expressed
                // as an 8 bit value regardless of the current pixel data output format,
                // i.e., 0 -> black, 255 -> white.
                camera.Parameters[PLCamera.AutoTargetValue].SetValue( 80 );
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                // Set the target value for luminance control.
                // A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
                // A value of 0.4 means 40 % and so forth.
                camera.Parameters[PLCamera.AutoTargetBrightness].SetValue( 0.3 );
            }

            // Try ExposureAuto = Continuous.
            Console.WriteLine( "Trying 'ExposureAuto = Continuous'." );
            Console.WriteLine( "Initial Exposure time = {0} us", camera.Parameters[exposureTime].GetValue() );
            camera.Parameters[PLCamera.ExposureAuto].SetValue( PLCamera.ExposureAuto.Continuous );

            // When "continuous" mode is selected, the parameter value is adjusted repeatedly while images are acquired.
            // Depending on the current frame rate, the automatic adjustments will usually be carried out for
            // every or every other image, unless the camera's microcontroller is kept busy by other tasks.
            // The repeated automatic adjustment will proceed until the "once" mode of operation is used or
            // until the auto function is set to "off", in which case the parameter value resulting from the latest
            // automatic adjustment will operate unless the value is manually adjusted.
            for (int n = 0; n < 20; n++)    // For demonstration purposes, we will use only 20 images.
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage( 1, result );
                    }
                }

                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep( 100 );
            }
            camera.Parameters[PLCamera.ExposureAuto].SetValue( PLCamera.ExposureAuto.Off ); // Switch off Exposure Auto.

            Console.WriteLine( "Final Exposure Time = {0} us", camera.Parameters[exposureTime].GetValue() );
        }


        static void AutoWhiteBalance( Camera camera )
        {
            // Check whether the Balance White Auto feature is available.
            if (!camera.Parameters[PLCamera.BalanceWhiteAuto].IsWritable)
            {
                Console.WriteLine( "The Camera does not support balance white auto." );
                return;
            }

            // Maximize the grabbed area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetToMinimum();
            camera.Parameters[PLCamera.OffsetY].TrySetToMinimum();
            camera.Parameters[PLCamera.Width].SetToMaximum();
            camera.Parameters[PLCamera.Height].SetToMaximum();

            // Set the Auto Function ROI for white balance statistics.
            // We want to use ROI2 for gathering the statistics.
            if (camera.Parameters[regionSelector].IsWritable)
            {
                camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
                camera.Parameters[autoFunctionAOIROIUseWhiteBalance].SetValue( false );// ROI 1 is not used for white balance control
                camera.Parameters[regionSelector].SetValue( regionSelectorValue2 );
                camera.Parameters[autoFunctionAOIROIUseWhiteBalance].SetValue( true );// ROI 2 is used for white balance control
            }
            camera.Parameters[regionSelector].SetValue( regionSelectorValue2 );
            camera.Parameters[regionSelectorOffsetX].SetToMinimum();
            camera.Parameters[regionSelectorOffsetY].SetToMinimum();
            camera.Parameters[regionSelectorWidth].SetToMaximum();
            camera.Parameters[regionSelectorHeight].SetToMaximum();

            Console.WriteLine( "Trying 'BalanceWhiteAuto = Once'." );
            Console.WriteLine( "Initial balance ratio:" );
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Red );
            Console.Write( "R = {0}  ", camera.Parameters[balanceRatio].GetValue() );
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Green );
            Console.Write( "G = {0}  ", camera.Parameters[balanceRatio].GetValue() );
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Blue );
            Console.Write( "B = {0}  ", camera.Parameters[balanceRatio].GetValue() );
            camera.Parameters[PLCamera.BalanceWhiteAuto].SetValue( PLCamera.BalanceWhiteAuto.Once );

            // When the "once" mode of operation is selected,
            // the parameter values are automatically adjusted until the related image property
            // reaches the target value. After the automatic parameter value adjustment is complete, the auto
            // function will automatically be set to "off" and the new parameter value will be applied to the
            // subsequently grabbed images.
            int n = 0;
            while (camera.Parameters[PLCamera.BalanceWhiteAuto].GetValue() != PLCamera.BalanceWhiteAuto.Off)
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage( 1, result );
                    }
                }
                n++;

                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep( 100 );

                //Make sure the loop is exited.
                if (n > 100)
                {
                    throw new TimeoutException( "The adjustment of auto white balance did not finish." );
                }
            }
            Console.WriteLine( "BalanceWhiteAuto went back to 'Off' after {0} Frames", n );
            Console.WriteLine( "Final balance ratio: " );
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Red );
            Console.Write( "R = {0}  ", camera.Parameters[balanceRatio].GetValue() );
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Green );
            Console.Write( "G = {0}  ", camera.Parameters[balanceRatio].GetValue() );
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Blue );
            Console.Write( "B = {0}  ", camera.Parameters[balanceRatio].GetValue() );
        }


        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Before accessing camera device parameters, the camera must be opened.
                    camera.Open();

                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    Configure( camera );

                    // Check the device type
                    String deviceType = camera.CameraInfo[CameraInfoKey.DeviceType];
                    Console.WriteLine( "Testing {0} Camera Params:", deviceType );
                    Console.WriteLine( "==============================" );

                    // Disable test image generator if available
                    camera.Parameters[PLCamera.TestImageSelector].TrySetValue( PLCamera.TestImageSelector.Off );
                    camera.Parameters[PLCamera.TestPattern].TrySetValue( PLCamera.TestPattern.Off );

                    // Only area scan cameras support auto functions.
                    if (camera.Parameters[PLCamera.DeviceScanType].GetValue() == PLCamera.DeviceScanType.Areascan)
                    {
                        // All area scan cameras support luminance control.

                        // Carry out luminance control by using the "once" gain auto function.
                        // For demonstration purposes only, set the gain to an initial value.
                        if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                        {
                            camera.Parameters[PLCamera.GainRaw].SetToMaximum();
                        }
                        else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                        {
                            camera.Parameters[PLCamera.Gain].SetToMaximum();
                        }
                        AutoGainOnce( camera );

                        Console.WriteLine( "Press Enter to continue." );
                        Console.ReadLine();

                        // Carry out luminance control by using the "continuous" gain auto function.
                        // For demonstration purposes only, set the gain to an initial value.
                        if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                        {
                            camera.Parameters[PLCamera.GainRaw].SetToMaximum();
                        }
                        else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                        {
                            camera.Parameters[PLCamera.Gain].SetToMaximum();
                        }
                        AutoGainContinuous( camera );

                        Console.WriteLine( "Press Enter to continue." );
                        Console.ReadLine();

                        // For demonstration purposes only, set the exposure time to an initial value.
                        camera.Parameters[exposureTime].SetToMinimum();

                        // Carry out luminance control by using the "once" exposure auto function.
                        AutoExposureOnce( camera );
                        Console.WriteLine( "Press Enter to continue." );
                        Console.ReadLine();

                        // For demonstration purposes only, set the exposure time to an initial value.
                        camera.Parameters[exposureTime].SetToMinimum();

                        // Carry out luminance control by using the "once" exposure auto function.
                        AutoExposureContinuous( camera );

                        // Only color cameras support the balance white auto function.
                        if (IsColorCamera( camera ))
                        {
                            Console.WriteLine( "Press Enter to continue." );
                            Console.ReadLine();

                            // For demonstration purposes only, set the initial balance ratio values:
                            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Red );
                            camera.Parameters[balanceRatio].SetToMaximum();
                            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Green );
                            camera.Parameters[balanceRatio].TrySetValuePercentOfRange( 50.0 );
                            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue( PLCamera.BalanceRatioSelector.Blue );
                            camera.Parameters[balanceRatio].SetToMinimum();

                            // Carry out white balance using the balance white auto function.
                            AutoWhiteBalance( camera );
                        }
                    }
                    else
                    {
                        Console.WriteLine( "Only area scan cameras support auto functions." );
                    }

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine( "\nPress enter to exit." );
            Console.ReadLine();

            Environment.Exit( exitCode );
        }
    }
}

Parameterize Camera Automatic Image Adjustment#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample illustrates how to mimic the 'Automatic Image Adjustment' button of the pylon Viewer. 
    This allows you to configure optimum settings on the camera under the current conditions. 
    All of the parameters are described in the Basler Product Documentation.
*/

using System;
using Basler.Pylon;

namespace ParametrizeCamera_AutomaticImageAdjustment
{
    class ParametrizeCamera_AutomaticImageAdjustment
    {

        private static Version sfnc2_0_0 = new Version(2, 0, 0);
        private static EnumName regionSelector;
        private static BooleanName autoFunctionAOIROIUseWhiteBalance, autoFunctionAOIROIUseBrightness;
        private static IntegerName regionSelectorWidth, regionSelectorHeight, regionSelectorOffsetX, regionSelectorOffsetY;
        private static String regionSelectorValue1, regionSelectorValue2;


        public static void GetROINodeIdentifier( Camera camera )
        {
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                regionSelector = PLCamera.AutoFunctionAOISelector;
                regionSelectorOffsetX = PLCamera.AutoFunctionAOIOffsetX;
                regionSelectorOffsetY = PLCamera.AutoFunctionAOIOffsetY;
                regionSelectorWidth = PLCamera.AutoFunctionAOIWidth;
                regionSelectorHeight = PLCamera.AutoFunctionAOIHeight;
                regionSelectorValue1 = PLCamera.AutoFunctionAOISelector.AOI1;
                regionSelectorValue2 = PLCamera.AutoFunctionAOISelector.AOI2;
                autoFunctionAOIROIUseBrightness = PLCamera.AutoFunctionAOIUsageIntensity;
                autoFunctionAOIROIUseWhiteBalance = PLCamera.AutoFunctionAOIUsageWhiteBalance;
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                regionSelector = PLCamera.AutoFunctionROISelector;
                regionSelectorOffsetX = PLCamera.AutoFunctionROIOffsetX;
                regionSelectorOffsetY = PLCamera.AutoFunctionROIOffsetY;
                regionSelectorWidth = PLCamera.AutoFunctionROIWidth;
                regionSelectorHeight = PLCamera.AutoFunctionROIHeight;
                regionSelectorValue1 = PLCamera.AutoFunctionROISelector.ROI1;
                regionSelectorValue2 = PLCamera.AutoFunctionROISelector.ROI2;
                autoFunctionAOIROIUseBrightness = PLCamera.AutoFunctionROIUseBrightness;
                autoFunctionAOIROIUseWhiteBalance = PLCamera.AutoFunctionROIUseWhiteBalance;
            }
        }

        internal static void Main()
        {
            // the exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    Console.WriteLine( "Open the camera." );
                    // Open the connection to the camera device.
                    camera.Open();

                    Console.WriteLine( "Retrieve the node IDs for the SFNC version of the camera." );
                    GetROINodeIdentifier( camera );

                    // Set the pixel format to one from a list of ones compatible with this example
                    Console.WriteLine( "Set a pixel format that is compatible." );
                    string[] pixelFormats = new string[]
                    {
                        PLCamera.PixelFormat.YUV422_YUYV_Packed,
                        PLCamera.PixelFormat.YCbCr422_8,
                        PLCamera.PixelFormat.BayerBG8,
                        PLCamera.PixelFormat.BayerRG8,
                        PLCamera.PixelFormat.BayerGR8,
                        PLCamera.PixelFormat.BayerGB8,
                        PLCamera.PixelFormat.Mono8
                    };
                    camera.Parameters[PLCamera.PixelFormat].SetValue( pixelFormats );

                    // Disable test image generator if available
                    Console.WriteLine( "Disable test image generator." );
                    camera.Parameters[PLCamera.TestImageSelector].TrySetValue( PLCamera.TestImageSelector.Off );
                    camera.Parameters[PLCamera.TestPattern].TrySetValue( PLCamera.TestPattern.Off );

                    // Set the Auto Function ROI for luminance and white balance statistics.
                    // We want to use ROI2 for gathering the statistics.
                    Console.WriteLine( "Reset AOI/ROI settings to a default state." );
                    if (camera.Parameters[regionSelector].IsWritable)
                    {
                        camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
                        camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue( true );// ROI 1 is used for brightness control
                        camera.Parameters[autoFunctionAOIROIUseWhiteBalance].SetValue( true );// ROI 1 is used for white balance control
                    }
                    camera.Parameters[regionSelector].SetValue( regionSelectorValue1 );
                    camera.Parameters[regionSelectorOffsetX].SetValue( camera.Parameters[PLCamera.OffsetX].GetMinimum() );
                    camera.Parameters[regionSelectorOffsetY].SetValue( camera.Parameters[PLCamera.OffsetY].GetMinimum() );
                    camera.Parameters[regionSelectorWidth].SetValue( camera.Parameters[PLCamera.Width].GetMaximum() );
                    camera.Parameters[regionSelectorHeight].SetValue( camera.Parameters[PLCamera.Height].GetMaximum() );

                    //Set the acquisition parameters to automatic mode
                    Console.WriteLine( "Set acquisition parameters (gamma, gain, exposure time...) to automatic mode." );
                    if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                    {
                        camera.Parameters[PLCamera.ProcessedRawEnable].TrySetValue( true );
                        camera.Parameters[PLCamera.GammaEnable].TrySetValue( true );
                        camera.Parameters[PLCamera.GammaSelector].TrySetValue( PLCamera.GammaSelector.sRGB );
                        camera.Parameters[PLCamera.AutoTargetValue].TrySetValue( 80 );
                        camera.Parameters[PLCamera.AutoFunctionProfile].TrySetValue( PLCamera.AutoFunctionProfile.GainMinimum );
                        camera.Parameters[PLCamera.AutoGainRawLowerLimit].TrySetToMinimum();
                        camera.Parameters[PLCamera.AutoGainRawUpperLimit].TrySetToMaximum();
                        camera.Parameters[PLCamera.AutoExposureTimeAbsLowerLimit].TrySetToMinimum();
                        camera.Parameters[PLCamera.AutoExposureTimeAbsUpperLimit].TrySetToMaximum();
                    }
                    else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                    {
                        camera.Parameters[PLCamera.AutoTargetBrightness].TrySetValue( 0.3 );
                        camera.Parameters[PLCamera.AutoFunctionProfile].TrySetValue( PLCamera.AutoFunctionProfile.MinimizeGain );
                        camera.Parameters[PLCamera.AutoGainLowerLimit].TrySetToMinimum();
                        camera.Parameters[PLCamera.AutoGainUpperLimit].TrySetToMaximum();
                        double maxExposure = camera.Parameters[PLCamera.AutoExposureTimeUpperLimit].GetMaximum();
                        // Reduce upper limit to one second for this example
                        if (maxExposure > 1000000)
                        {
                            maxExposure = 1000000;
                        }
                        camera.Parameters[PLCamera.AutoExposureTimeUpperLimit].TrySetValue( maxExposure );
                    }

                    // Set all auto functions to once in this example
                    camera.Parameters[PLCamera.GainSelector].TrySetValue( PLCamera.GainSelector.All );
                    camera.Parameters[PLCamera.GainAuto].TrySetValue( PLCamera.GainAuto.Once );
                    camera.Parameters[PLCamera.ExposureAuto].TrySetValue( PLCamera.ExposureAuto.Once );
                    camera.Parameters[PLCamera.BalanceWhiteAuto].TrySetValue( PLCamera.BalanceWhiteAuto.Once );
                    if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                    {
                        camera.Parameters[PLCamera.LightSourceSelector].TrySetValue( PLCamera.LightSourceSelector.Daylight );
                    }
                    else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                    {
                        camera.Parameters[PLCamera.LightSourcePreset].TrySetValue( PLCamera.LightSourcePreset.Daylight5000K );
                    }

                    Console.WriteLine( "Start acuisition of 20 images and display them, for a first check." );
                    camera.StreamGrabber.Start();
                    for (int n = 0; n < 20; n++)            // For demonstration purposes, we will grab "only" 20 images.
                    {
                        IGrabResult result = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                        using (result)
                        {
                            // Image grabbed successfully?
                            if (result.GrabSucceeded)
                            {
                                ImageWindow.DisplayImage( 1, result );
                            }
                        }

                        //For demonstration purposes only. Wait until the image is shown.
                        System.Threading.Thread.Sleep( 100 );
                    }

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Parameterize Camera Configurations#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/    
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the Migration topic in the pylon C# API documentation.

    This sample shows how to use configuration event handlers by applying the standard
    configurations and registering sample configuration event handlers.

    If the configuration event handler is registered the registered methods are called
    when the state of the camera objects changes, e.g. when the camera object is opened
    or closed. In pylon.NET a configuration event handler is a method that parametrizes
    the camera.
*/

using System;
using System.Collections.Generic;
using Basler.Pylon;

namespace ParameterizeCamera_Configurations
{
    class ParameterizeCamera_Configurations
    {
        // Number of images to be grabbed.
        static int countOfImagesToGrab = 3;


        public static void PixelFormatAndAoiConfiguration( object sender, EventArgs e )
        {
            ICamera camera = sender as ICamera;
            camera.Parameters[PLCamera.OffsetX].TrySetToMinimum();
            camera.Parameters[PLCamera.OffsetY].TrySetToMinimum();

            camera.Parameters[PLCamera.Width].TrySetToMaximum();
            camera.Parameters[PLCamera.Height].TrySetToMaximum();

            camera.Parameters[PLCamera.PixelFormat].TrySetValue( PLCamera.PixelFormat.Mono8 );
        }


        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        static void AcquireContinuous( object sender, EventArgs e )
        {
            // Disable all trigger types.
            DisableAllTriggers( sender, e );

            // Disable compression.
            DisableCompression( sender, e );

            // Disable GenDC streaming.
            DisableGenDC( sender, e );

            // Set image component.
            SelectRangeComponent( sender, e );

            // set maximum packet size
            ProbePacketSize( sender, e );

            // Set acquisition mode to Continuous.
            ICamera camera = sender as ICamera;
            camera.Parameters[(EnumName)"AcquisitionMode"].SetValue( "Continuous" );
        }


        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        static void AcquireSingleFrame( object sender, EventArgs e )
        {
            // Disable all trigger types.
            DisableAllTriggers( sender, e );

            // Disable compression.
            DisableCompression( sender, e );

            // Disable GenDC streaming.
            DisableGenDC( sender, e );

            // Set image component.
            SelectRangeComponent( sender, e );

            // set maximum packet size
            ProbePacketSize( sender, e );

            // Set acquisition mode to SingleFrame.
            ICamera camera = sender as ICamera;
            camera.Parameters[(EnumName)"AcquisitionMode"].SetValue( "SingleFrame" );
        }


        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        public static void SoftwareTrigger( object sender, EventArgs e )
        {
            // Disable compression.
            DisableCompression( sender, e );

            // Disable GenDC streaming.
            DisableGenDC( sender, e );

            // Set image component.
            SelectRangeComponent( sender, e );

            // set maximum packet size
            ProbePacketSize( sender, e );

            ICamera camera = sender as ICamera;
            // Get required Enumerations.
            IEnumParameter triggerSelector = camera.Parameters [PLCamera.TriggerSelector];
            IEnumParameter triggerMode = camera.Parameters [PLCamera.TriggerMode];
            IEnumParameter triggerSource = camera.Parameters [PLCamera.TriggerSource];

            // Check the available camera trigger mode(s) to select the appropriate one: acquisition start trigger mode
            // (used by older cameras, i.e. for cameras supporting only the legacy image acquisition control mode;
            // do not confuse with acquisition start command) or frame start trigger mode
            // (used by newer cameras, i.e. for cameras using the standard image acquisition control mode;
            // equivalent to the acquisition start trigger mode in the legacy image acquisition control mode).
            string triggerName = "FrameStart";
            if (!triggerSelector.CanSetValue( triggerName ))
            {
                triggerName = "AcquisitionStart";
                if (!triggerSelector.CanSetValue( triggerName ))
                {
                    throw new NotSupportedException( "Could not select trigger. Neither FrameStart nor AcquisitionStart is available." );
                }
            }

            try
            {
                foreach (string trigger in triggerSelector)
                {
                    triggerSelector.SetValue( trigger );

                    if (triggerName == trigger)
                    {
                        // Activate trigger.
                        triggerMode.SetValue( PLCamera.TriggerMode.On );

                        // Set the trigger source to software.
                        triggerSource.SetValue( PLCamera.TriggerSource.Software );
                    }
                    else
                    {
                        // Turn trigger mode off.
                        triggerMode.SetValue( PLCamera.TriggerMode.Off );
                    }
                }
            }
            finally
            {
                // Set selector for software trigger.
                triggerSelector.SetValue( triggerName );
            }
            // Set acquisition mode to Continuous
            camera.Parameters[PLCamera.AcquisitionMode].SetValue( PLCamera.AcquisitionMode.Continuous );
        }



        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        static void DisableAllTriggers( object sender, EventArgs e )
        {
            ICamera camera = sender as ICamera;
            // Disable all trigger types.
            //------------------------------------------------------------------------------

            // Get required enumerations.
            IEnumParameter triggerSelector = camera.Parameters [PLCamera.TriggerSelector];
            IEnumParameter triggerMode = camera.Parameters [PLCamera.TriggerMode];

            // Remember previous selector value.
            string oldSelectorValue = triggerSelector.IsReadable ? triggerSelector.GetValue() : null;

            try
            {
                // Turn trigger mode off for all trigger selector entries.
                foreach (string trigger in triggerSelector)
                {
                    triggerSelector.SetValue( trigger );
                    triggerMode.SetValue( PLCamera.TriggerMode.Off );
                }
            }
            finally
            {
                // Restore previous selector.
                if (oldSelectorValue != null)
                {
                    triggerSelector.SetValue( oldSelectorValue );
                }
            }
            // Set acquisition mode to Continuous.
            camera.Parameters[PLCamera.AcquisitionMode].SetValue( PLCamera.AcquisitionMode.SingleFrame );
        }

        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        static void DisableCompression( object sender, EventArgs e )
        {
            ICamera camera = sender as ICamera;

            // Disable compression mode.
            //------------------------------------------------------------------------------

            // Get required enumeration.
            IEnumParameter compressionMode = camera.Parameters [PLCamera.ImageCompressionMode];

            if (compressionMode.IsWritable)
            {
                // Turn off compression mode.
                compressionMode.SetValue( "Off" );
            }
        }

        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        static void DisableGenDC( object sender, EventArgs e )
        {
            ICamera camera = sender as ICamera;

            // Get required enumeration GenDCStreamingMode
            IEnumParameter genDCMode = camera.Parameters [PLCamera.GenDCStreamingMode];

            if (genDCMode.IsWritable)
            {
                genDCMode.SetValue( "Off" );
            }
        }

        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        static void SelectRangeComponent( object sender, EventArgs e )
        {
            ICamera camera = sender as ICamera;

            IEnumParameter componentSelector = camera.Parameters[PLCamera.ComponentSelector];
            IBooleanParameter enableComponent = camera.Parameters[PLCamera.ComponentEnable];
            IEnumParameter pixelFormat = camera.Parameters[PLCamera.PixelFormat];

            // if multiple components are supported then enable only the 'range' component.
            if (componentSelector.IsWritable)
            {
                string originalComponentValue = componentSelector.GetValue();

                //Iterate over all selector entries where CanSetValue() == true
                foreach( string entry in componentSelector )
                {
                    componentSelector.SetValue( entry );

                    // if Range is selected enable it set pixel format to Mono16 or Mono8
                    if (entry == "Range")
                    {
                        enableComponent.SetValue( true );
                        pixelFormat.TrySetValue( new[] { PLCamera.PixelFormat.Mono8, PLCamera.PixelFormat.Mono16 } );
                    }
                    else
                    {
                        // disable other components
                        enableComponent.SetValue( false );
                    }
                }

                // reset the component selector
                componentSelector.SetValue( originalComponentValue );
            }
        }
        // Shown here for demonstration purposes only to illustrate the effect of this configuration.
        static void ProbePacketSize(Object sender, EventArgs e )
        {
            ICamera  camera = sender as ICamera;

            // Get Get ProbePacketSize command
            ICommandParameter probePacketSize = camera.Parameters[(CommandName)"@StreamGrabber0/ProbePacketSize"];

            if ( probePacketSize.IsWritable )
            {
                probePacketSize.Execute();
            }
        }

        //It is used as a CImageEventPrinter like in C++
        //OnImageGrabbed is used to print the image information like Width, Height etc..
        //Can be used to implement other functionality for image grab event.
        static void OnImageGrabbed( Object sender, ImageGrabbedEventArgs e )
        {
            if (e.GrabResult.GrabSucceeded)
            {
                Console.WriteLine( "SizeX:{0}", e.GrabResult.Width );
                Console.WriteLine( "SizeY:{0}", e.GrabResult.Height );
                byte[] pixelData = (byte[])e.GrabResult.PixelData;
                Console.WriteLine( "Gray value of first pixel:{0}", pixelData[0] );
            }
            else
            {
                Console.WriteLine( "Error Code: {0} Error Description: {1}", e.GrabResult.ErrorCode, e.GrabResult.ErrorDescription );
            }
        }

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    IGrabResult result;

                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Print the device type
                    String deviceType = camera.CameraInfo[CameraInfoKey.DeviceType];
                    Console.WriteLine( "Testing {0} Camera Params:", deviceType );
                    Console.WriteLine( "==============================" );

                    //Register handler for acquired images
                    camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;

                    Console.WriteLine( "Grab using continuous acquisition:" );

                    // Register the standard configuration event handler for setting up the camera for continuous acquisition.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    // The camera's Open() method calls the configuration handler's method that
                    // applies the required parameter modifications.
                    camera.Open();

                    // Grab some images for demonstration.
                    camera.StreamGrabber.Start( countOfImagesToGrab );
                    while (camera.StreamGrabber.IsGrabbing)
                    {
                        result = camera.StreamGrabber.RetrieveResult( 5000, TimeoutHandling.ThrowException );
                    }

                    // Close the camera
                    camera.Close();

                    //-------------------------------------------------------------

                    Console.WriteLine( "Grab using software trigger mode:" );

                    // Register the standard configuration event handler for setting up the camera for software
                    // triggering.
                    camera.CameraOpened += Configuration.SoftwareTrigger;

                    // The camera's Open() method calls the configuration handler's method that
                    // applies the required parameter modifications.
                    camera.Open();

                    // Check if camera supports waiting for trigger ready
                    if (camera.CanWaitForFrameTriggerReady)
                    {
                        // StartGrabbing() calls the camera's Open() automatically if the camera is not open yet.
                        // The Open method calls the configuration handler's OnOpened() method that
                        // sets the required parameters for enabling software triggering.
                        // Grab some images for demonstration.

                        camera.StreamGrabber.Start( countOfImagesToGrab );
                        while (camera.StreamGrabber.IsGrabbing)
                        {
                            if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling.ThrowException ))
                            {
                                camera.ExecuteSoftwareTrigger();
                            }
                            result = camera.StreamGrabber.RetrieveResult( 5000, TimeoutHandling.ThrowException );
                        }
                    }
                    else
                    {
                        Console.WriteLine( "The software trigger sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger." );
                    }

                    //Close the camera
                    camera.Close();

                    //-------------------------------------------------------------

                    Console.WriteLine( "Grab using single frame acquisition:" );

                    // Register the standard configuration event handler for setting up the camera for
                    // single frame acquisition.
                    camera.CameraOpened += Configuration.AcquireSingleFrame;

                    // The camera's Open() method calls the configuration handler's method that
                    // applies the required parameter modifications.
                    camera.Open();

                    //Start multiple single grabs as configured.
                    result = camera.StreamGrabber.GrabOne( 5000, TimeoutHandling.ThrowException );
                    result = camera.StreamGrabber.GrabOne( 5000, TimeoutHandling.ThrowException );
                    result = camera.StreamGrabber.GrabOne( 5000, TimeoutHandling.ThrowException );
                    result = camera.StreamGrabber.GrabOne( 5000, TimeoutHandling.ThrowException );

                    //Close the camera
                    camera.Close();

                    //-------------------------------------------------------------

                    Console.WriteLine( "Grab using multiple configuration objects:" );

                    // Register the standard configuration event handler for setting up the camera for
                    // single frame acquisition and a custom event handler for pixel format and AOI configuration.
                    camera.CameraOpened += Configuration.AcquireSingleFrame;
                    camera.CameraOpened += PixelFormatAndAoiConfiguration;

                    // The camera's Open() method calls the configuration handler's method that
                    // applies the required parameter modifications.
                    camera.Open();

                    result = camera.StreamGrabber.GrabOne( 5000, TimeoutHandling.ThrowException );

                    //Close the camera
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine( "\nPress enter to exit." );
            Console.ReadLine();

            Environment.Exit( exitCode );
        }
    }
}

Parameterize Camera Load and Save#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample application demonstrates how to save or load the features of a camera
    to or from a file.
*/

using System;
using Basler.Pylon;

namespace ParameterizeCamera_LoadAndSave
{
    class ParameterizeCamera_LoadAndSave
    {
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            // The name of the pylon feature stream file.
            const string filename = "CameraParameters.pfs";

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Before accessing camera device parameters, the camera must be opened.
                    camera.Open();

                    Console.WriteLine( "Saving camera device parameters to file {0} ...", filename );
                    // Save the content of the camera device parameters in the file.
                    camera.Parameters.Save( filename, ParameterPath.CameraDevice );

                    Console.WriteLine( "Reading file {0} back to camera device parameters ...", filename );
                    // Just for demonstration, read the content of the file back to the camera device parameters.
                    camera.Parameters.Load( filename, ParameterPath.CameraDevice );

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine( "\nPress enter to exit." );
            Console.ReadLine();

            Environment.Exit( exitCode );
        }
    }
}

Parameterize Camera Lookup Table#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample program demonstrates the use of the Luminance Lookup Table feature.
*/

using System;
using System.Collections.Generic;
using Basler.Pylon;

namespace ParameterizeCamera_LookupTable
{
    class ParameterizeCamera_LookupTable
    {
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    Console.WriteLine( "Opening camera..." );
                    // Open the camera.
                    camera.Open();
                    Console.WriteLine( "Done\n" );

                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );
                    //Check the device type
                    String deviceType = camera.CameraInfo[CameraInfoKey.DeviceType];
                    Console.WriteLine( "Testing {0} Camera Params:", deviceType );
                    Console.WriteLine( "==============================" );

                    Console.WriteLine( "Writing LUT...." );

                    // Select the lookup table using the LUTSelector.
                    camera.Parameters[PLCamera.LUTSelector].SetValue( PLCamera.LUTSelector.Luminance );

                    // Some cameras have 10 bit and others have 12 bit lookup tables, so determine
                    // the type of the lookup table for the current device.
                    int nValues = (int) camera.Parameters[PLCamera.LUTIndex].GetMaximum() + 1;
                    int inc = 0;
                    if (nValues == 4096) // 12 bit LUT.
                    {
                        inc = 8;
                    }
                    else if (nValues == 1024) // 10 bit LUT.
                    {
                        inc = 2;
                    }
                    else
                    {
                        throw new Exception( "Type of LUT is not supported by this sample." );
                    }

                    // Use LUTIndex and LUTValue parameter to access the lookup table values.
                    // The following lookup table causes an inversion of the sensor values.
                    for (int i = 0; i < nValues; i += inc)
                    {
                        camera.Parameters[PLCamera.LUTIndex].SetValue( i );
                        camera.Parameters[PLCamera.LUTValue].SetValue( nValues - 1 - i );
                    }
                    Console.WriteLine( "DONE" );

                    // Enable the lookup table.
                    camera.Parameters[PLCamera.LUTEnable].SetValue( true );
                    // Grab and process images here.
                    // ...

                    // Disable the lookup table.
                    camera.Parameters[PLCamera.LUTEnable].SetValue( false );
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine( "\nPress enter to exit." );
            Console.ReadLine();

            Environment.Exit( exitCode );
        }
    }
}

Parameterize Camera Usersets#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/

    Demonstrates how to use user configuration sets (user sets) and how to configure the camera
    to start up with the user defined settings of user set 1.

    You can also configure your camera using the pylon Viewer and
    store your custom settings in a user set of your choice.


    ATTENTION:
    Executing this sample will overwrite all current settings in user set 1.
*/

using System;
using Basler.Pylon;

namespace ParameterizeCamera_UserSets
{
    class ParameterizeCamera_UserSets
    {
        static Version sfnc2_0_0 = new Version(2, 0, 0);
        public static EnumName userDefaultSelector;
        public static string userDefaultSelectorUserSet1;

        public static void Configure( Camera camera )
        {
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                userDefaultSelector = PLCamera.UserSetDefaultSelector;
                userDefaultSelectorUserSet1 = PLCamera.UserSetDefaultSelector.UserSet1;
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                userDefaultSelector = PLCamera.UserSetDefault;
                userDefaultSelectorUserSet1 = PLCamera.UserSetDefault.UserSet1;
            }
        }
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Before accessing camera device parameters, the camera must be opened.
                    camera.Open();

                    Configure( camera );

                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Print the device type
                    String deviceType = camera.CameraInfo[CameraInfoKey.DeviceType];
                    Console.WriteLine( "Testing {0} Camera Params:", deviceType );

                    // Remember the current default user set selector so we can restore it later when cleaning up.
                    String oldDefaultUserSet = camera.Parameters[userDefaultSelector].GetValue();

                    // Load default settings.
                    Console.WriteLine( "Loading Default Params" );
                    camera.Parameters[PLCamera.UserSetSelector].SetValue( PLCamera.UserSetSelector.Default );
                    camera.Parameters[PLCamera.UserSetLoad].Execute();

                    // Set gain and exposure time values.
                    // The camera won't let you set specific values when related auto functions are active.
                    // So we need to disable the related auto functions before setting the values.
                    Console.WriteLine( "Turning off Gain Auto and Exposure Auto" );
                    camera.Parameters[PLCamera.GainAuto].TrySetValue( PLCamera.GainAuto.Off );
                    if (camera.GetSfncVersion() < sfnc2_0_0 && camera.Parameters[PLCamera.GainRaw].IsReadable) // Handling for older cameras
                    {
                        camera.Parameters[PLCamera.GainRaw].TrySetValue( camera.Parameters[PLCamera.GainRaw].GetMinimum() );
                    }
                    else if(camera.Parameters[PLCamera.Gain].IsReadable) // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                    {
                        camera.Parameters[PLCamera.Gain].TrySetValue( camera.Parameters[PLCamera.Gain].GetMinimum() );
                    }
                    camera.Parameters[PLCamera.ExposureAuto].TrySetValue( PLCamera.ExposureAuto.Off );
                    if (camera.GetSfncVersion() < sfnc2_0_0 && camera.Parameters[PLCamera.ExposureTime].IsReadable) // Handling for older cameras
                    {
                        camera.Parameters[PLCamera.ExposureTimeRaw].TrySetValue( camera.Parameters[PLCamera.ExposureTimeRaw].GetMinimum() );
                    }
                    else if (camera.Parameters[PLCamera.ExposureTime].IsReadable) // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                    {
                        camera.Parameters[PLCamera.ExposureTime].TrySetValue( camera.Parameters[PLCamera.ExposureTime].GetMinimum() );
                    }

                    // Save to user set 1.
                    //
                    // ATTENTION:
                    // This will overwrite all settings previously saved in user set 1.
                    Console.WriteLine( "Saving Currently Active Settings to user set 1" );
                    camera.Parameters[PLCamera.UserSetSelector].SetValue( PLCamera.UserSetSelector.UserSet1 );
                    camera.Parameters[PLCamera.UserSetSave].Execute();

                    // Load Default Settings.
                    Console.WriteLine( "Loading default settings." );
                    camera.Parameters[PLCamera.UserSetSelector].SetValue( PLCamera.UserSetSelector.Default );
                    camera.Parameters[PLCamera.UserSetLoad].Execute();

                    // Show Default Settings.
                    Console.WriteLine( "Default settings" );
                    Console.WriteLine( "================" );
                    if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                    {
                        if (camera.Parameters[PLCamera.GainRaw].IsReadable)
                        {
                            Console.WriteLine( "Gain                :{0}", camera.Parameters[PLCamera.GainRaw].GetValue() );
                        }
                        Console.WriteLine( "Exposure Time       :{0}", camera.Parameters[PLCamera.ExposureTimeRaw].GetValue() );
                    }
                    else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                    {
                        if (camera.Parameters[PLCamera.Gain].IsReadable)
                        {
                            Console.WriteLine( "Gain                :{0}", camera.Parameters[PLCamera.Gain].GetValue() );
                        }
                        Console.WriteLine( "Exposure Time       :{0}", camera.Parameters[PLCamera.ExposureTime].GetValue() );
                    }

                    // Show User Set 1 settings.
                    Console.WriteLine( "Loading User set 1 Settings." );
                    camera.Parameters[PLCamera.UserSetSelector].SetValue( PLCamera.UserSetSelector.UserSet1 );
                    camera.Parameters[PLCamera.UserSetLoad].TryExecute();
                    Console.WriteLine( "User Set 1 Settings" );
                    Console.WriteLine( "===================" );
                    if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                    {
                        if (camera.Parameters[PLCamera.GainRaw].IsReadable)
                        {
                            Console.WriteLine( "Gain                :{0}", camera.Parameters[PLCamera.GainRaw].GetValue() );
                        }
                        Console.WriteLine( "Exposure Time       :{0}", camera.Parameters[PLCamera.ExposureTimeRaw].GetValue() );
                    }
                    else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                    {
                        if (camera.Parameters[PLCamera.Gain].IsReadable)
                        {
                            Console.WriteLine( "Gain                :{0}", camera.Parameters[PLCamera.Gain].GetValue() );
                        }
                        Console.WriteLine( "Exposure Time       :{0}", camera.Parameters[PLCamera.ExposureTime].GetValue() );
                    }

                    // Set user set 1 as default user set:
                    // When the camera wakes up it will be configured
                    // with the settings from user set 1.
                    camera.Parameters[userDefaultSelector].TrySetValue( userDefaultSelectorUserSet1 );

                    // Restore the default user set selector.
                    camera.Parameters[userDefaultSelector].TrySetValue( oldDefaultUserSet );

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine( "\nPress enter to exit." );
            Console.ReadLine();

            Environment.Exit( exitCode );
        }
    }
}

Pylon Live View#

The pylon Live View sample demonstrates how to build a simple GUI application using Windows Forms. It shows how to access and change camera parameters using controls, how to generate a device list, and how to show grabbed images.


Live View Sample

GUI Multi-Camera#

The GUI Multi-Camera sample demonstrates how to build a simple GUI application using multiple cameras.


Sample Multi Cam

Creating AVI Video Files#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

   This sample illustrates how to create a video file in Audio Video Interleave (AVI) format.
*/

using System;
using Basler.Pylon;

namespace Grab
{
    class Grab
    {
        const int countOfImagesToGrab = 100;
        const string videoFilename = "Utility_GrabAvi.avi";

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Open the connection to the camera device.
                    camera.Open();

                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Optional: Depending on your camera, computer, and codec choice, you may not be able
                    // to save a video without losing frames. Therefore, we limit the resolution:
                    camera.Parameters[PLCamera.Width].TrySetValue( 640, IntegerValueCorrection.Nearest );
                    camera.Parameters[PLCamera.Height].TrySetValue( 480, IntegerValueCorrection.Nearest );
                    camera.Parameters[PLCamera.PixelFormat].TrySetValue( PLCamera.PixelFormat.Mono8 );

                    // We also increase the number of memory buffers to be used while grabbing.
                    camera.Parameters[PLCameraInstance.MaxNumBuffer].SetValue( 20 );

                    // Create and open the AviVideoWriter.
                    using (AviVideoWriter writer = new AviVideoWriter())
                    {
                        // This will create an uncompressed file.
                        // If you want to use a specific codec, you should call an overload where you can
                        // pass the four-character code of the codec you want to use or pass preset compression options
                        // using the compressionOptions parameter.
                        writer.Create( videoFilename, 25, camera );

                        // Start grabbing.
                        camera.StreamGrabber.Start( countOfImagesToGrab );

                        Console.WriteLine( "Please wait. Images are being grabbed." );

                        while (camera.StreamGrabber.IsGrabbing)
                        {
                            // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                            IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                            using (grabResult)
                            {
                                // Image grabbed successfully?
                                if (grabResult.GrabSucceeded)
                                {
                                    // Write the image to the .avi file.
                                    writer.Write( grabResult );
                                }
                                else
                                {
                                    Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                                }
                            }
                        }

                        // Stop grabbing.
                        camera.StreamGrabber.Stop();

                        // Close the .avi file.
                        writer.Close();
                    }

                    // Close the connection to the camera device.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Creating MPEG4 Video Files#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

   This sample illustrates how to create a video file in MP4 format.
*/

using System;
using Basler.Pylon;

namespace Grab
{
    class Grab
    {
        const int countOfImagesToGrab = 100;
        const string videoFilename = "Utility_GrabVideo.mp4";

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            // Check if VideoWriter is supported and all required DLLs are available.
            if (!VideoWriter.IsSupported)
            {
                Console.WriteLine( "VideoWriter is not supported at the moment. Please install the pylon Supplementary Package for MPEG-4 which is available on the Basler website." );
                // Return with error code 1.
                Environment.Exit( 1 );
            }

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Open the connection to the camera device.
                    camera.Open();

                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Optional: Depending on your camera or computer, you may not be able to save
                    // a video without losing frames. Therefore, we limit the resolution:
                    camera.Parameters[PLCamera.Width].TrySetValue( 640, IntegerValueCorrection.Nearest );
                    camera.Parameters[PLCamera.Height].TrySetValue( 480, IntegerValueCorrection.Nearest );
                    camera.Parameters[PLCamera.PixelFormat].TrySetValue( PLCamera.PixelFormat.Mono8 );

                    // We also increase the number of memory buffers to be used while grabbing.
                    camera.Parameters[PLCameraInstance.MaxNumBuffer].SetValue( 20 );

                    // Create and open the VideoWriter.
                    using (VideoWriter writer = new VideoWriter())
                    {
                        // Set a quality of 90 for the video (value range is 1 to 100).
                        writer.Parameters[PLVideoWriter.Quality].SetValue( 90 );

                        // This will create a compressed video file.
                        writer.Create( videoFilename, 25, camera );

                        // Start grabbing.
                        camera.StreamGrabber.Start( countOfImagesToGrab );

                        Console.WriteLine( "Please wait. Images are being grabbed." );

                        while (camera.StreamGrabber.IsGrabbing)
                        {
                            // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                            IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                            using (grabResult)
                            {
                                // Image grabbed successfully?
                                if (grabResult.GrabSucceeded)
                                {
                                    // Write the image to the video file.
                                    writer.Write( grabResult );
                                }
                                else
                                {
                                    Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                                }
                            }
                        }

                        // Stop grabbing.
                        camera.StreamGrabber.Stop();

                        // Close the video file.
                        writer.Close();
                    }

                    // Close the connection to the camera device.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Configure the IP address of a camera device#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample demonstrates how to configure the IP address of a camera device.
*/

using System;
using System.Collections.Generic;
using Basler.Pylon;


namespace Utility_IpConfig
{
    class Utility_IpConfig
    {
        internal static void Main( string[] args )
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                List<ICameraInfo> deviceList = IpConfigurator.EnumerateAllDevices();

                if (args.Length < 2)
                {
                    // Print usage information.
                    Console.WriteLine( "Usage: Utility_IpConfig <MAC> <IP> [MASK] [GATEWAY]" );
                    Console.WriteLine( "       <MAC> is the MAC address without separators, e.g., 0030531596CF" );
                    Console.WriteLine( "       <IP> is one of the following:" );
                    Console.WriteLine( "            - AUTO to use Auto-IP (LLA)." );
                    Console.WriteLine( "            - DHCP to use DHCP." );
                    Console.WriteLine( "            - Everything else is interpreted as a new IP address in dotted notation, e.g., 192.168.1.1" );
                    Console.WriteLine( "       [MASK] is the network mask in dotted notation. This is optional. 255.255.255.0 is used as default." );
                    Console.WriteLine( "       [GATEWAY] is the gateway address in dotted notation. This is optional. 0.0.0.0 is used as default." );
                    Console.WriteLine( "Please note that this is a sample and no sanity checks are made." );
                    Console.WriteLine( "" );
                    Console.WriteLine( String.Format( "{0,-103}{1,-15}", "Available Devices", "   supports " ) );
                    Console.WriteLine( String.Format( "{0,-32}{1,-14}{2,-17}{3,-17}{4,-13}{5,-9}{6,-5}{7,-6}{8,-5}",
                                                "Friendly Name", "MAC", "IP Address", "Subnet Mask", "Gateway", "Mode", "IP?", "DHCP?", "LLA?" ) );

                    foreach (var device in deviceList)
                    {
                        // Determine currently active configuration method.
                        String currentConfig;
                        if (IpConfigurator.IsPersistentIpActive( device ))
                        {
                            currentConfig = "StaticIP";
                        }
                        else if (IpConfigurator.IsDhcpActive( device ))
                        {
                            currentConfig = "DHCP";
                        }
                        else if (IpConfigurator.IsAutoIpActive( device ))
                        {
                            currentConfig = "AutoIP";
                        }
                        else
                        {
                            currentConfig = "Unknown";
                        }

                        Console.WriteLine( String.Format( "{0,-32}{1,-14}{2,-17}{3,-17}{4,-13}{5,-9}{6,-5}{7,-6}{8,-5}",
                                device[CameraInfoKey.FriendlyName], device[CameraInfoKey.DeviceMacAddress], device[CameraInfoKey.DeviceIpAddress],
                                device[CameraInfoKey.SubnetMask], device[CameraInfoKey.DefaultGateway], currentConfig,
                                IpConfigurator.IsPersistentIpSupported( device ), IpConfigurator.IsDhcpSupported( device ), IpConfigurator.IsAutoIpSupported( device ) ) );
                    }
                    exitCode = 1;
                }
                else
                {
                    // Read arguments. Note that sanity checks are skipped for clarity.
                    String macAddress = args[0];
                    String ipAddress = args[1];
                    String subnetMask = "255.255.255.0";
                    if (args.Length >= 3)
                    {
                        subnetMask = args[2];
                    }
                    String defaultGateway = "0.0.0.0";
                    if (args.Length >= 4)
                    {
                        defaultGateway = args[3];
                    }

                    // Check if configuration mode is AUTO, DHCP, or IP address.
                    bool isAuto = args[1].Equals("AUTO");
                    bool isDhcp = args[1].Equals("DHCP");
                    IpConfigurationMethod configurationMethod = IpConfigurationMethod.StaticIP;
                    if (isAuto)
                    {
                        configurationMethod = IpConfigurationMethod.AutoIP;
                    }
                    else if (isDhcp)
                    {
                        configurationMethod = IpConfigurationMethod.DHCP;
                    }

                    // Find the camera's user-defined name.
                    String userDefinedName = "";
                    foreach (var device in deviceList)
                    {
                        if (macAddress == device[CameraInfoKey.DeviceMacAddress])
                        {
                            userDefinedName = device[CameraInfoKey.UserDefinedName];
                        }
                    }

                    // Set new IP configuration.
                    bool setOk = false;
                    if (configurationMethod == IpConfigurationMethod.StaticIP)
                    {
                        setOk = IpConfigurator.ChangeIpConfiguration( macAddress, configurationMethod, ipAddress, subnetMask, defaultGateway );
                    }
                    else
                    {
                        setOk = IpConfigurator.ChangeIpConfiguration( macAddress, configurationMethod );
                    }

                    if (setOk)
                    {
                        Console.WriteLine( "Successfully changed IP configuration via broadcast for device {0} to {1}.", macAddress, ipAddress );
                    }
                    else
                    {
                        Console.WriteLine( "Failed to change IP configuration via broadcast for device {0}.", macAddress );
                        Console.WriteLine( "This is not an error. The device may not support broadcast IP configuration." );
                    }
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Working with a camera device behind a router#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon .NET API documentation: https://docs.baslerweb.com/pylonapi/net/ 

    This sample illustrates how to work with a device behind a router using the 
    AnnounceRemoteDevice() function. For more information, read the documentation of the IpConfigurator class.

    ATTENTION: This sample only works with GigE devices. 
*/

using System;
using System.Collections.Generic;
using Basler.Pylon;


namespace Utility_AnnouceRemoteDevice
{
    class Utility_AnnouceRemoteDevice
    {
        internal static void PrintCameraList( String headline )
        {
            List<ICameraInfo> deviceList = CameraFinder.Enumerate();

            Console.WriteLine( "Available Devices " + headline );
            Console.WriteLine( String.Format( "{0,-32}{1,-14}{2,-17}{3,-17}{4,-15}{5,-8}",
                                            "Friendly Name", "MAC", "IP Address", "Subnet Mask", "Gateway", "Mode" ) );

            foreach (var device in deviceList)
            {
                // Determine currently active configuration method
                String currentConfig;
                if (IpConfigurator.IsPersistentIpActive( device ))
                {
                    currentConfig = "StaticIP";
                }
                else if (IpConfigurator.IsDhcpActive( device ))
                {
                    currentConfig = "DHCP";
                }
                else if (IpConfigurator.IsAutoIpActive( device ))
                {
                    currentConfig = "AutoIP";
                }
                else
                {
                    currentConfig = "Unknown";
                }

                Console.WriteLine( String.Format( "{0,-32}{1,-14}{2,-17}{3,-17}{4,-15}{5,-8}",
                            device[CameraInfoKey.FriendlyName], device[CameraInfoKey.DeviceMacAddress], device[CameraInfoKey.DeviceIpAddress],
                            device[CameraInfoKey.SubnetMask], device[CameraInfoKey.DefaultGateway], currentConfig ) );
            }
        }

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            // The IP address of a GigE camera device behind a router
            String ipAddress = "10.1.1.1";

            // Keep a pylon object so that devices can be announced safely.
            // Check the documentation for AnnounceRemoteDevice() for details.
            Library lib = new Library();

            try
            {
                // Camera list at start - the camera device behind the router is not visible
                PrintCameraList( "(at start)" );

                IpConfigurator.AnnounceRemoteDevice( ipAddress );

                // Camera list after announce call - the camera device behind the router is visible
                PrintCameraList( "(after AnnounceRemoteDevice)" );

                IpConfigurator.RenounceRemoteDevice( ipAddress );

                // Camera list after renounce call - the camera device behind the router is not visible
                PrintCameraList( "(after RenounceRemoteDevice)" );
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}

Decompressing Images Grabbed With Basler Compression Beyond#

C#

/*
    Note: Before getting started, Basler recommends reading the Programmer's Guide topic
    in the pylon C# API documentation: https://docs.baslerweb.com/pylonapi/net/

    This sample illustrates how to enable the Compression Beyond feature in Basler cameras and
    how to decompress images using the ImageDecompressor class.
*/


using System;
using Basler.Pylon;

namespace Utility_ImageDecompressor
{
    class Utility_ImageDecompressor
    {
        static void PrintCompressionInfo( CompressionInfo compressionInfo )
        {
            Console.WriteLine( "\n\nCompression info:" );
            Console.WriteLine( "HasCompressedImage: {0}", compressionInfo.HasCompressedImage );
            Console.WriteLine( "CompressionStatus: {0} ({0:D})", compressionInfo.CompressionStatus );
            Console.WriteLine( "Lossy: {0}", compressionInfo.Lossy );
            Console.WriteLine( "Width: {0}", compressionInfo.Width );
            Console.WriteLine( "Height: {0}", compressionInfo.Height );
            Console.WriteLine( "PixelType: {0} ({0:D}) ", compressionInfo.PixelType );
            Console.WriteLine( "DecompressedImageSize: {0}", compressionInfo.DecompressedImageSize );
            Console.WriteLine( "DecompressedPayloadSize: {0}", compressionInfo.DecompressedPayloadSize );
        }

        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine( "Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName] );

                    // Set the acquisition mode to single frame acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireSingleFrame;

                    // Open the connection to the camera device.
                    camera.Open();

                    // Remember the original compression mode.
                    string oldCompressionMode = camera.Parameters [PLCamera.ImageCompressionMode].GetValue();

                    // Set the compression mode to BaslerCompressionBeyond if available.
                    camera.Parameters[PLCamera.ImageCompressionMode].SetValue( PLCamera.ImageCompressionMode.BaslerCompressionBeyond );
                    // After enabling the compression, we can read the compression rate option.
                    string oldCompressionRateOption = camera.Parameters [PLCamera.ImageCompressionRateOption].GetValue();
                    // Configure lossless compression.
                    camera.Parameters[PLCamera.ImageCompressionRateOption].SetValue( PLCamera.ImageCompressionRateOption.Lossless );

                    // Create the decompressor and initialize it with the camera.
                    using (ImageDecompressor decompressor = new ImageDecompressor( camera ))
                    {
                        // Wait max. 5000ms for a new image.
                        IGrabResult grabResult = camera.StreamGrabber.GrabOne( 5000 );
                        using (grabResult)
                        {
                            if (grabResult.GrabSucceeded)
                            {
                                // Fetch compression info and check whether the image was compressed by the camera.
                                CompressionInfo compressionInfo = new CompressionInfo();
                                if (ImageDecompressor.GetCompressionInfo( ref compressionInfo, grabResult ))
                                {
                                    // Print content of CompressionInfo.
                                    PrintCompressionInfo( compressionInfo );

                                    // Check if we have a valid compressed image
                                    if (compressionInfo.CompressionStatus == CompressionStatus.Ok)
                                    {
                                        // Show compression ratio.
                                        Console.WriteLine( "\nTransferred compressed payload: {0}", grabResult.PayloadSize );
                                        Console.WriteLine( "Compression ratio: {0:N2}%", (Single)grabResult.PayloadSize / (Single)compressionInfo.DecompressedPayloadSize * 100.0 );

                                        // Create buffer for storing the decompressed image.
                                        var myBuffer = new Byte [compressionInfo.DecompressedImageSize];

                                        // Decompress the image.
                                        decompressor.DecompressImage( myBuffer, grabResult );

                                        // Show the image.
                                        ImageWindow.DisplayImage( 1, myBuffer, compressionInfo.PixelType, compressionInfo.Width, compressionInfo.Height, 0, ImageOrientation.TopDown );
                                    }
                                    else
                                    {
                                        Console.WriteLine( "There was an error while the camera was compressing the image." );
                                    }
                                }
                            }
                            else
                            {
                                // Somehow image grabbing failed.
                                Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                            }
                        }

                        Console.WriteLine( "\n\n--- Switching to Fix Ratio compression ---" );

                        // Take another picture with lossy compression (if available).
                        if (camera.Parameters[PLCamera.ImageCompressionRateOption].TrySetValue( PLCamera.ImageCompressionRateOption.FixRatio ))
                        {
                            // After changing the compression parameters, the decompressor MUST be reconfigured.
                            decompressor.SetCompressionDescriptor( camera );

                            // Wait max. 5000ms for a new image.
                            grabResult = camera.StreamGrabber.GrabOne( 5000 );
                            using (grabResult)
                            {
                                if (grabResult.GrabSucceeded)
                                {
                                    // Fetch compression info and check whether the image was compressed by the camera.
                                    CompressionInfo compressionInfo = new CompressionInfo();
                                    if (ImageDecompressor.GetCompressionInfo( ref compressionInfo, grabResult ))
                                    {
                                        // Print content of CompressionInfo.
                                        PrintCompressionInfo( compressionInfo );

                                        // Check if we have a valid compressed image
                                        if (compressionInfo.CompressionStatus == CompressionStatus.Ok)
                                        {
                                            // Show compression ratio.
                                            Console.WriteLine( "\nTransferred compressed payload: {0}", grabResult.PayloadSize );
                                            Console.WriteLine( "Compression ratio: {0:N2}%", (Single)grabResult.PayloadSize / (Single)compressionInfo.DecompressedPayloadSize * 100.0 );

                                            // Create buffer for storing the decompressed image.
                                            var myBuffer = new Byte [compressionInfo.DecompressedImageSize];

                                            // Decompress the image.
                                            decompressor.DecompressImage( myBuffer, grabResult );

                                            // Show the image.
                                            ImageWindow.DisplayImage( 2, myBuffer, compressionInfo.PixelType, compressionInfo.Width, compressionInfo.Height, 0, ImageOrientation.TopDown );
                                        }
                                        else
                                        {
                                            Console.WriteLine( "There was an error while the camera was compressing the image." );
                                        }
                                    }
                                }
                                else
                                {
                                    // Somehow image grabbing failed.
                                    Console.WriteLine( "Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription );
                                }
                            }
                        }
                        else
                        {
                            Console.WriteLine( "With this setting the camera does not support the \"FixRatio\" Image Compression Rate Option." );
                        }
                    }
                    // restore the old camera settings
                    camera.Parameters[PLCamera.ImageCompressionRateOption].SetValue( oldCompressionRateOption );
                    camera.Parameters[PLCamera.ImageCompressionMode].SetValue( oldCompressionMode );

                    camera.Close();
                }
            }
            catch (InvalidOperationException e)
            {
                Console.Error.WriteLine( "Exception: Camera does not support Compression. {0}", e.Message );
                exitCode = 1;
            }
            catch (Exception e)
            {
                Console.Error.WriteLine( "Exception: {0}", e.Message );
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine( "\nPress enter to exit." );
                Console.ReadLine();
            }

            Environment.Exit( exitCode );
        }
    }
}